hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6d9a5cea1d904efba5aba12767ba1e5edfa15c6
| 72
|
py
|
Python
|
perf/vendor/shaded_pyflakes/__init__.py
|
RDIL/Perf
|
a1cbd1ea4f93b311557a6349fc5d731fcfc36810
|
[
"MIT"
] | 1
|
2020-01-17T13:50:40.000Z
|
2020-01-17T13:50:40.000Z
|
perf/vendor/shaded_pyflakes/__init__.py
|
RDIL/PyPerf
|
a1cbd1ea4f93b311557a6349fc5d731fcfc36810
|
[
"MIT"
] | 1
|
2020-02-05T16:13:51.000Z
|
2020-02-07T17:04:21.000Z
|
perf/vendor/shaded_pyflakes/__init__.py
|
RDIL/Perf
|
a1cbd1ea4f93b311557a6349fc5d731fcfc36810
|
[
"MIT"
] | null | null | null |
__version__ = "snapshot-2.1.1-be88036019005b769596ca82fb7b82dfdffdca0f"
| 36
| 71
| 0.861111
| 6
| 72
| 9.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.376812
| 0.041667
| 72
| 1
| 72
| 72
| 0.463768
| 0
| 0
| 0
| 0
| 0
| 0.763889
| 0.763889
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e6e22ebb352b06dc0b48a04aa4a2f7a1db4b1766
| 37
|
py
|
Python
|
Python/.examination/BIN/BINARY.DAT.py
|
Smith-Br/BackEnd
|
710e54866b3d73cfa3ebd1c22ac661f00770ce88
|
[
"MIT"
] | 1
|
2021-12-22T08:58:49.000Z
|
2021-12-22T08:58:49.000Z
|
Python/.examination/BIN/BINARY.DAT.py
|
Smith-Br/Python
|
710e54866b3d73cfa3ebd1c22ac661f00770ce88
|
[
"MIT"
] | null | null | null |
Python/.examination/BIN/BINARY.DAT.py
|
Smith-Br/Python
|
710e54866b3d73cfa3ebd1c22ac661f00770ce88
|
[
"MIT"
] | null | null | null |
binary_data = 101011111100000011111
| 12.333333
| 35
| 0.864865
| 3
| 37
| 10.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.636364
| 0.108108
| 37
| 2
| 36
| 18.5
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc0a67a9f21dc9b72cf10089ee2e4899639f2b78
| 95
|
py
|
Python
|
boa3_test/test_sc/built_in_methods_test/IsInstanceBoolLiteral.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/built_in_methods_test/IsInstanceBoolLiteral.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/built_in_methods_test/IsInstanceBoolLiteral.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from boa3.builtin import public
@public
def Main() -> bool:
return isinstance(123, bool)
| 13.571429
| 32
| 0.705263
| 13
| 95
| 5.153846
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 0.189474
| 95
| 6
| 33
| 15.833333
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
fc308d44ba35b0cfe7f705d58dd365193de052b0
| 81,914
|
py
|
Python
|
graphql_compiler/tests/snapshot_tests/test_query_pagination.py
|
kensho-technologies/graphql-compiler
|
4318443b7b2512a059f3616112bfc40bbf8eec06
|
[
"Apache-2.0"
] | 521
|
2017-07-18T23:56:25.000Z
|
2022-03-25T16:39:06.000Z
|
graphql_compiler/tests/snapshot_tests/test_query_pagination.py
|
kensho-technologies/graphql-compiler
|
4318443b7b2512a059f3616112bfc40bbf8eec06
|
[
"Apache-2.0"
] | 740
|
2017-07-19T01:52:42.000Z
|
2021-09-30T11:15:00.000Z
|
graphql_compiler/tests/snapshot_tests/test_query_pagination.py
|
kensho-technologies/graphql-compiler
|
4318443b7b2512a059f3616112bfc40bbf8eec06
|
[
"Apache-2.0"
] | 56
|
2017-07-18T23:56:14.000Z
|
2021-10-30T08:08:56.000Z
|
# Copyright 2019-present Kensho Technologies, LLC.
import datetime
from typing import Tuple
import unittest
from graphql import print_ast
import pytest
from .. import test_input_data
from ...cost_estimation.analysis import analyze_query_string
from ...cost_estimation.statistics import LocalStatistics
from ...exceptions import GraphQLInvalidArgumentError
from ...global_utils import QueryStringWithParameters
from ...query_pagination import paginate_query
from ...query_pagination.pagination_planning import (
InsufficientQuantiles,
MissingClassCount,
PaginationAdvisory,
PaginationPlan,
VertexPartitionPlan,
get_pagination_plan,
)
from ...query_pagination.parameter_generator import (
_choose_parameter_values,
generate_parameters_for_vertex_partition,
)
from ...query_pagination.query_parameterizer import generate_parameterized_queries
from ...schema.schema_info import EdgeConstraint, QueryPlanningSchemaInfo, UUIDOrdering
from ...schema_generation.graphql_schema import get_graphql_schema_from_schema_graph
from ..test_helpers import compare_graphql, generate_schema_graph, get_function_names_from_module
from ..test_input_data import CommonTestData
# The following TestCase class uses the 'snapshot_orientdb_client' fixture
# which pylint does not recognize as a class member.
# pylint: disable=no-member
@pytest.mark.slow
class QueryPaginationTests(unittest.TestCase):
"""Test the query pagination module."""
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_basic(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the correct plan is generated when it's obvious (page the root)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
expected_plan = PaginationPlan((VertexPartitionPlan(("Animal",), "uuid", number_of_pages),))
expected_advisories: Tuple[PaginationAdvisory, ...] = tuple()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_invalid_extra_args(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the correct plan is generated when it's obvious (page the root)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{"country": "USA"},
)
with self.assertRaises(GraphQLInvalidArgumentError):
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
get_pagination_plan(analysis, number_of_pages)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_invalid_missing_args(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the correct plan is generated when it's obvious (page the root)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
@filter(op_name: "=", value: ["$animal_name"])
}
}""",
{},
)
with self.assertRaises(GraphQLInvalidArgumentError):
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
get_pagination_plan(analysis, number_of_pages)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_unique_filter(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: "=", value: ["$animal_uuid"])
name @output(out_name: "animal_name")
}
}""",
{
"animal_uuid": "40000000-0000-0000-0000-000000000000",
},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
# This is a white box test. We check that we don't paginate on the root when it has a
# unique filter on it. A better plan is to paginate on a different vertex, but that is
# not implemented.
expected_plan = PaginationPlan(tuple())
expected_advisories: Tuple[PaginationAdvisory, ...] = tuple()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_unique_filter_on_many_to_one(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {
"Animal": 1000,
"Animal_FedAt": 10000000,
"FeedingEvent": 100000,
}
statistics = LocalStatistics(class_counts)
edge_constraints = {"Animal_ParentOf": EdgeConstraint.AtMostOneSource}
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
edge_constraints=edge_constraints,
)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
out_Animal_ParentOf {
uuid @filter(op_name: "=", value: ["$animal_uuid"])
}
out_Animal_FedAt {
name @output(out_name: "feeding_event_name")
}
}
}""",
{
"animal_uuid": "40000000-0000-0000-0000-000000000000",
},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
# This is a white box test. There's a filter on the child, which narrows down
# the number of possible roots down to 1. This makes the root a bad pagination
# vertex. Ideally, we'd paginate on the FeedingEvent node, but that's not implemented.
expected_plan = PaginationPlan(tuple())
expected_advisories: Tuple[PaginationAdvisory, ...] = tuple()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_on_int(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts, field_quantiles={("Species", "limbs"): list(range(100))}
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the paginator generates a plan paginating on an int field
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
expected_plan = PaginationPlan(
(VertexPartitionPlan(("Species",), "limbs", number_of_pages),)
)
expected_advisories: Tuple[PaginationAdvisory, ...] = ()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_on_int_error(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
class_counts = {"Species": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the paginator detects a lack of quantile data for Species.limbs
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
expected_plan = PaginationPlan(tuple())
expected_advisories = (InsufficientQuantiles("Species", "limbs", 0, 51),)
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
# TODO: These tests can be sped up by having an existing test SchemaGraph object.
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_basic_pagination(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 4,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 1)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
expected_first = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "40000000-0000-0000-0000-000000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "40000000-0000-0000-0000-000000000000",
},
)
# Check that the correct first page and remainder are generated
compare_graphql(self, expected_first.query_string, first.query_string)
self.assertEqual(expected_first.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the first page is estimated to fit into a page
first_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, first_page_cardinality_estimate)
# Get the second page
second_page_and_remainder, _ = paginate_query(schema_info, remainder[0], 1)
second = second_page_and_remainder.one_page
remainder = second_page_and_remainder.remainder
expected_second = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
@filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "40000000-0000-0000-0000-000000000000",
"__paged_param_1": "80000000-0000-0000-0000-000000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_1": "80000000-0000-0000-0000-000000000000",
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_second.query_string, second.query_string)
self.assertEqual(expected_second.parameters, second.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the second page is estimated to fit into a page
second_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, second_page_cardinality_estimate)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_basic_pagination_mssql_uuids(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LastSixBytesFirst}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 4,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 1)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
expected_first = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "00000000-0000-0000-0000-400000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "00000000-0000-0000-0000-400000000000",
},
)
# Check that the correct first page and remainder are generated
compare_graphql(self, expected_first.query_string, first.query_string)
self.assertEqual(expected_first.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the first page is estimated to fit into a page
first_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, first_page_cardinality_estimate)
# Get the second page
second_page_and_remainder, _ = paginate_query(schema_info, remainder[0], 1)
second = second_page_and_remainder.one_page
remainder = second_page_and_remainder.remainder
expected_second = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
@filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "00000000-0000-0000-0000-400000000000",
"__paged_param_1": "00000000-0000-0000-0000-800000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_1": "00000000-0000-0000-0000-800000000000",
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_second.query_string, second.query_string)
self.assertEqual(expected_second.parameters, second.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the second page is estimated to fit into a page
second_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, second_page_cardinality_estimate)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_datetime(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Event"] = ("event_date",) # Force pagination on datetime field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
}
}""",
{},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 100)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# There are 1000 dates uniformly spread out between year 2000 and 3000, so to get
# 100 results, we stop at 2010.
expected_page_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "event_name")
}
}""",
{
"__paged_param_0": datetime.datetime(2010, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "event_name")
}
}""",
{
"__paged_param_0": datetime.datetime(2010, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, first.query_string)
self.assertEqual(expected_page_query.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
# Get the second page
second_page_and_remainder, _ = paginate_query(schema_info, remainder[0], 100)
second = second_page_and_remainder.one_page
remainder = second_page_and_remainder.remainder
expected_page_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
@filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "event_name")
}
}""",
{
# TODO parameters seem wonky
"__paged_param_0": datetime.datetime(2010, 1, 1, 0, 0),
"__paged_param_1": datetime.datetime(2019, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "event_name")
}
}""",
{
"__paged_param_1": datetime.datetime(2019, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, second.query_string)
self.assertEqual(expected_page_query.parameters, second.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_datetime_existing_filter(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
# We allow pagination on uuid as well and leave it to the pagination planner to decide to
# paginate on event_date to prevent empty pages if the two fields are correlated.
pagination_keys["Event"] = ("uuid", "event_date")
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
local_datetime = datetime.datetime(2050, 1, 1, 0, 0)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
}
}""",
{"date_lower": local_datetime},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 100)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# There are 1000 dates uniformly spread out between year 2000 and 3000, so to get
# 100 results after 2050, we stop at 2059.
expected_page_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
@filter(op_name: "<", value: ["$__paged_param_0"])
}
}""",
{
"date_lower": local_datetime,
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
}
}""",
{
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, first.query_string)
self.assertEqual(expected_page_query.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_existing_datetime_filter(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Event"] = ("event_date",) # Force pagination on datetime field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
}
}""",
{"date_lower": datetime.datetime(2050, 1, 1, 0, 0)},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 100)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# We can't expect anything good when using a tz-aware filter on a tz-naive
# field, but at least we shouldn't error. The current implementation ignores
# the timezone, so this is a white-box test for that behavior.
expected_page_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
@filter(op_name: "<", value: ["$__paged_param_0"])
}
}""",
{
"date_lower": datetime.datetime(2050, 1, 1, 0, 0),
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
}
}""",
{
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, first.query_string)
self.assertEqual(expected_page_query.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [25, 50, 75]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_few_quantiles(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 10000000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [
0,
10,
20,
30,
],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 3)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [10, 20]
self.assertEqual(expected_parameters, list(generated_parameters))
def test_choose_parameter_values(self):
self.assertEqual([1], list(_choose_parameter_values([1], 2)))
self.assertEqual([1], list(_choose_parameter_values([1], 3)))
self.assertEqual([1], list(_choose_parameter_values([1, 1], 3)))
self.assertEqual([3], list(_choose_parameter_values([1, 3], 2)))
self.assertEqual([1, 3], list(_choose_parameter_values([1, 3], 3)))
self.assertEqual([1, 3], list(_choose_parameter_values([1, 3], 4)))
self.assertEqual([3], list(_choose_parameter_values([1, 3, 5], 2)))
self.assertEqual([3, 5], list(_choose_parameter_values([1, 3, 5], 3)))
self.assertEqual([1, 3, 5], list(_choose_parameter_values([1, 3, 5], 4)))
self.assertEqual([1, 3, 5], list(_choose_parameter_values([1, 3, 5], 5)))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_existing_filters(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: ">=", value: ["$limbs_lower"])
}
}""",
{"limbs_lower": 25},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 3)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [50, 75]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_existing_filter_tiny_page(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): list(range(0, 101, 10))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: ">=", value: ["$limbs_lower"])
}
}""",
{"limbs_lower": 10},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 10)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
first_parameter = next(generated_parameters)
self.assertTrue(first_parameter > 10)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_existing_filters_2(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: "<", value: ["$limbs_upper"])
}
}""",
{"limbs_upper": 76},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 3)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [25, 50]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_inline_fragment(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
out_Entity_Related {
... on Species {
name @output(out_name: "species_name")
}
}
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species", "out_Entity_Related"), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [25, 50, 75]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_with_existing_filters(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts, field_quantiles={("Species", "limbs"): list(range(0, 1001, 10))}
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
limbs @filter(op_name: "<", value: ["$num_limbs"])
name @output(out_name: "species_name")
}
}""",
{"num_limbs": 505},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
# XXX document why this is expected, see if bisect_left logic is correct
expected_parameters = [130, 260, 390]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_datetime(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Event"] = ("event_date",) # Force pagination on datetime field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Event",), "event_date", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
datetime.datetime(2025, 1, 1, 0, 0),
datetime.datetime(2050, 1, 1, 0, 0),
datetime.datetime(2075, 1, 1, 0, 0),
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_uuid(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Animal",), "uuid", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
"40000000-0000-0000-0000-000000000000",
"80000000-0000-0000-0000-000000000000",
"c0000000-0000-0000-0000-000000000000",
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_mssql_uuid(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LastSixBytesFirst}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Animal",), "uuid", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
"00000000-0000-0000-0000-400000000000",
"00000000-0000-0000-0000-800000000000",
"00000000-0000-0000-0000-c00000000000",
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_mssql_uuid_with_existing_filter(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LastSixBytesFirst}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$uuid_lower"])
name @output(out_name: "animal_name")
}
}""",
{
"uuid_lower": "00000000-0000-0000-0000-800000000000",
},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Animal",), "uuid", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
"00000000-0000-0000-0000-a00000000000",
"00000000-0000-0000-0000-c00000000000",
"00000000-0000-0000-0000-e00000000000",
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_consecutive(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
# Check that there are no duplicates
list_parameters = list(generated_parameters)
self.assertEqual(len(list_parameters), len(set(list_parameters)))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_query_parameterizer(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
analysis = analyze_query_string(schema_info, query)
next_page, remainder = generate_parameterized_queries(analysis, vertex_partition, 100)
expected_next_page = """{
Species {
limbs @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
expected_remainder = """{
Species {
limbs @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
compare_graphql(self, expected_next_page, print_ast(next_page.query_ast))
compare_graphql(self, expected_remainder, print_ast(remainder.query_ast))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_query_parameterizer_name_conflict(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
@filter(op_name: "!=", value: ["$__paged_param_0"])
}
}""",
{"__paged_param_0": "Cow"},
)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
analysis = analyze_query_string(schema_info, query)
next_page, remainder = generate_parameterized_queries(analysis, vertex_partition, 100)
expected_next_page = """{
Species {
limbs @filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "species_name")
@filter(op_name: "!=", value: ["$__paged_param_0"])
}
}"""
expected_remainder = """{
Species {
limbs @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "species_name")
@filter(op_name: "!=", value: ["$__paged_param_0"])
}
}"""
compare_graphql(self, expected_next_page, print_ast(next_page.query_ast))
compare_graphql(self, expected_remainder, print_ast(remainder.query_ast))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_query_parameterizer_filter_deduplication(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
limbs @filter(op_name: ">=", value: ["$limbs_more_than"])
name @output(out_name: "species_name")
}
}""",
{
"limbs_more_than": 100,
},
)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
analysis = analyze_query_string(schema_info, query)
next_page, remainder = generate_parameterized_queries(analysis, vertex_partition, 100)
expected_next_page = """{
Species {
limbs @filter(op_name: ">=", value: ["$limbs_more_than"])
@filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
expected_remainder = """{
Species {
limbs @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
compare_graphql(self, expected_next_page, print_ast(next_page.query_ast))
compare_graphql(self, expected_remainder, print_ast(remainder.query_ast))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_no_pagination(self):
"""Ensure pagination is not done when not needed."""
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
original_query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 4,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, original_query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# No pagination necessary
compare_graphql(self, original_query.query_string, first.query_string)
self.assertEqual(original_query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_impossible_pagination(self):
"""Ensure no unwanted error is raised when pagination is needed but stats are missing."""
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {} # No pagination keys, so the planner has no options
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
original_query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 100000,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, original_query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# Query should be split, but there's no viable pagination method.
compare_graphql(self, original_query.query_string, first.query_string)
self.assertEqual(original_query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_impossible_pagination_strong_filters_few_repeated_quantiles(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000000000000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): list(i for i in range(0, 101, 10) for _ in range(10000))
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: "between", value: ["$limbs_lower", "$limbs_upper"])
}
}""",
{
"limbs_lower": 10,
"limbs_upper": 14,
},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# Query should be split, but there's not enough quantiles
compare_graphql(self, query.query_string, first.query_string)
self.assertEqual(query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_impossible_pagination_strong_filters_few_quantiles(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000000000000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): list(i for i in range(0, 101, 10) for _ in range(10000))
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: "between", value: ["$limbs_lower", "$limbs_upper"])
}
}""",
{
"limbs_lower": 10,
"limbs_upper": 14,
},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# Query should be split, but there's not enough quantiles
compare_graphql(self, query.query_string, first.query_string)
self.assertEqual(query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_with_compiler_tests(self):
"""Test that pagination doesn't crash on any of the queries from the compiler tests."""
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
count_data = {vertex_name: 100 for vertex_name in schema_graph.vertex_class_names}
count_data.update({edge_name: 100 for edge_name in schema_graph.edge_class_names})
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
arbitrary_value_for_type = {
"String": "string_1",
"ID": "40000000-0000-0000-0000-000000000000",
"Int": 5,
"Date": datetime.date(2000, 1, 1),
"DateTime": datetime.datetime(2000, 1, 1),
"Decimal": 5.3,
"[String]": ["string_1", "string_2"],
}
for test_name in get_function_names_from_module(test_input_data):
method = getattr(test_input_data, test_name)
if hasattr(method, "__annotations__"):
output_type = method.__annotations__.get("return")
if output_type == CommonTestData:
test_data = method()
query = test_data.graphql_input
args = {
arg_name: arbitrary_value_for_type[str(arg_type)]
for arg_name, arg_type in test_data.expected_input_metadata.items()
}
paginate_query(schema_info, QueryStringWithParameters(query, args), 10)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_missing_vertex_class_count(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
# No class counts provided
statistics = LocalStatistics({})
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, advisories = paginate_query(schema_info, query, 1)
self.assertTrue(first_page_and_remainder.remainder == tuple())
self.assertEqual(advisories, (MissingClassCount("Animal"),))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_missing_non_root_vertex_class_count(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
out_Animal_LivesIn {
name @output(out_name: "animal")
}
}
}""",
{},
)
# No counts for Location
count_data = {
"Animal": 1000,
"Animal_LivesIn": 1000,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, advisories = paginate_query(schema_info, query, 1)
self.assertTrue(first_page_and_remainder.remainder == tuple())
self.assertEqual(advisories, (MissingClassCount("Location"),))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_missing_edge_class_count(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
out_Animal_LivesIn {
name @output(out_name: "animal")
}
}
}""",
{},
)
# No counts for Animal_LivesIn
count_data = {
"Animal": 1000,
"Location": 10000,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, advisories = paginate_query(schema_info, query, 1)
self.assertTrue(first_page_and_remainder.remainder == tuple())
self.assertEqual(advisories, (MissingClassCount("Animal_LivesIn"),))
@pytest.mark.xfail(strict=True, reason="inline fragment not supported", raises=Exception)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_with_inline_fragment(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): list(range(100))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
out_Entity_Related {
... on Species {
name @output(out_name: "species_name")
}
}
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition_plan = VertexPartitionPlan(("Species", "out_Entity_Related"), "limbs", 2)
generated_parameters = generate_parameters_for_vertex_partition(
analysis, vertex_partition_plan
)
sentinel = object()
first_param = next(generated_parameters, sentinel)
self.assertEqual(50, first_param)
page_query, _ = generate_parameterized_queries(analysis, vertex_partition_plan, first_param)
expected_page_query_string = """{
Species {
out_Entity_Related {
... on Species {
limbs @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}
}
}"""
compare_graphql(self, expected_page_query_string, print_ast(page_query.query_ast))
| 41.601828
| 100
| 0.62081
| 8,147
| 81,914
| 5.866454
| 0.046397
| 0.066975
| 0.045194
| 0.025966
| 0.914069
| 0.906767
| 0.900029
| 0.894882
| 0.888542
| 0.880403
| 0
| 0.027472
| 0.291208
| 81,914
| 1,968
| 101
| 41.622967
| 0.795711
| 0.052421
| 0
| 0.675316
| 0
| 0
| 0.075917
| 0.027704
| 0
| 0
| 0
| 0.001016
| 0.051231
| 1
| 0.024617
| false
| 0
| 0.011976
| 0
| 0.037259
| 0.005323
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc5343d0db36742ffe4755464b9e3772a0c6042a
| 170
|
py
|
Python
|
src/hist/axis/transform.py
|
chrisburr/hist
|
d10132ab8d03f41152f0b934a18291ce699453b2
|
[
"BSD-3-Clause"
] | null | null | null |
src/hist/axis/transform.py
|
chrisburr/hist
|
d10132ab8d03f41152f0b934a18291ce699453b2
|
[
"BSD-3-Clause"
] | null | null | null |
src/hist/axis/transform.py
|
chrisburr/hist
|
d10132ab8d03f41152f0b934a18291ce699453b2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from boost_histogram.axis.transform import AxisTransform, Pow, Function, sqrt, log
__all__ = ("AxisTransform", "Pow", "Function", "sqrt", "log")
| 34
| 82
| 0.688235
| 20
| 170
| 5.6
| 0.75
| 0.285714
| 0.428571
| 0.5
| 0.553571
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006711
| 0.123529
| 170
| 4
| 83
| 42.5
| 0.744966
| 0.123529
| 0
| 0
| 0
| 0
| 0.210884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fc7d9e8e172dec7aec454484b6b72503542fcb1d
| 176
|
py
|
Python
|
PySpace/re_1.py
|
dralee/LearningRepository
|
4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4
|
[
"Apache-2.0"
] | null | null | null |
PySpace/re_1.py
|
dralee/LearningRepository
|
4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4
|
[
"Apache-2.0"
] | null | null | null |
PySpace/re_1.py
|
dralee/LearningRepository
|
4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# 文件名:re_1.py
import re
print(re.match('www','www.runoob.com').span()) # 在起始位置匹配
print(re.match('com','www.runoob.com')) # 不在起始位置匹配
| 22
| 56
| 0.642045
| 29
| 176
| 3.862069
| 0.655172
| 0.125
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.096591
| 176
| 7
| 57
| 25.142857
| 0.685535
| 0.386364
| 0
| 0
| 0
| 0
| 0.330097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
fca0e8cb52068d02ffc35a1e8775aaae609faa31
| 28
|
py
|
Python
|
weldx/asdf/tags/weldx/unit/__init__.py
|
vhirtham/weldx
|
50d212e9755271d7299acac103f3f0a8f1390fd6
|
[
"BSD-3-Clause"
] | 13
|
2020-02-20T07:45:02.000Z
|
2021-12-10T13:15:47.000Z
|
weldx/asdf/tags/weldx/unit/__init__.py
|
vhirtham/weldx
|
50d212e9755271d7299acac103f3f0a8f1390fd6
|
[
"BSD-3-Clause"
] | 675
|
2020-02-20T07:47:00.000Z
|
2022-03-31T15:17:19.000Z
|
weldx/asdf/tags/weldx/unit/__init__.py
|
vhirtham/weldx
|
50d212e9755271d7299acac103f3f0a8f1390fd6
|
[
"BSD-3-Clause"
] | 5
|
2020-09-02T07:19:17.000Z
|
2021-12-05T08:57:50.000Z
|
from . import pint_quantity
| 14
| 27
| 0.821429
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fca3c35367aafccf267ccffe86c3b8c90e1bf21f
| 29
|
py
|
Python
|
project_tw/output/martian_plot.py
|
terranandes/crawler
|
ed0ca2315f8e0bea3cccd9a74416f371afc02b22
|
[
"Apache-2.0"
] | 7
|
2021-03-15T10:27:42.000Z
|
2022-02-28T07:25:56.000Z
|
project_tw/output/martian_plot.py
|
terranandes/crawler
|
ed0ca2315f8e0bea3cccd9a74416f371afc02b22
|
[
"Apache-2.0"
] | null | null | null |
project_tw/output/martian_plot.py
|
terranandes/crawler
|
ed0ca2315f8e0bea3cccd9a74416f371afc02b22
|
[
"Apache-2.0"
] | 2
|
2021-03-30T08:38:19.000Z
|
2021-04-06T12:06:03.000Z
|
import bar_chart_race as bcr
| 14.5
| 28
| 0.862069
| 6
| 29
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d9d64fd4e5e5732abbe45c2912b99ff9f22137a
| 2,274
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/B_40_13_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/B_40_13_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/B_40_13_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_40_13_9 = {0: {'A': 0.294, 'C': -0.152, 'E': 0.064, 'D': -0.054, 'G': 0.217, 'F': -0.489, 'I': 0.242, 'H': -0.381, 'K': -0.374, 'M': 0.53, 'L': 0.413, 'N': -0.044, 'Q': 0.172, 'P': 0.003, 'S': -0.112, 'R': -0.322, 'T': -0.018, 'W': -0.43, 'V': 0.217, 'Y': 0.224}, 1: {'A': 0.119, 'C': -0.01, 'E': 0.002, 'D': 0.045, 'G': -0.093, 'F': 0.057, 'I': 0.347, 'H': -0.165, 'K': -0.063, 'M': -0.163, 'L': 0.246, 'N': -0.151, 'Q': -0.512, 'P': 0.062, 'S': -0.063, 'R': -0.376, 'T': 0.287, 'W': -0.04, 'V': 0.522, 'Y': -0.053}, 2: {'A': 0.021, 'C': -0.17, 'E': -0.08, 'D': -0.079, 'G': -0.057, 'F': -0.638, 'I': -0.02, 'H': 0.009, 'K': 0.326, 'M': 0.029, 'L': 0.027, 'N': 0.11, 'Q': 0.181, 'P': -0.162, 'S': 0.167, 'R': 0.408, 'T': 0.113, 'W': -0.216, 'V': 0.063, 'Y': -0.035}, 3: {'A': 0.013, 'C': 0.001, 'E': 0.001, 'D': -0.0, 'G': 0.002, 'F': 0.001, 'I': 0.002, 'H': -0.005, 'K': -0.0, 'M': -0.003, 'L': 0.001, 'N': -0.003, 'Q': -0.004, 'P': -0.004, 'S': 0.001, 'R': -0.004, 'T': 0.001, 'W': -0.005, 'V': 0.004, 'Y': -0.001}, 4: {'A': 0.002, 'C': 0.001, 'E': 0.001, 'D': 0.001, 'G': -0.0, 'F': 0.001, 'I': -0.0, 'H': -0.001, 'K': -0.002, 'M': -0.001, 'L': -0.0, 'N': -0.0, 'Q': -0.0, 'P': 0.002, 'S': -0.001, 'R': -0.003, 'T': 0.0, 'W': 0.001, 'V': 0.001, 'Y': -0.0}, 5: {'A': -0.005, 'C': 0.001, 'E': 0.001, 'D': -0.001, 'G': 0.001, 'F': 0.006, 'I': 0.005, 'H': -0.003, 'K': -0.008, 'M': 0.003, 'L': 0.006, 'N': 0.001, 'Q': 0.001, 'P': -0.002, 'S': -0.003, 'R': -0.012, 'T': 0.001, 'W': 0.003, 'V': 0.004, 'Y': 0.002}, 6: {'A': 0.099, 'C': -0.004, 'E': 0.061, 'D': 0.045, 'G': 0.039, 'F': -0.148, 'I': 0.123, 'H': -0.136, 'K': -0.079, 'M': 0.071, 'L': 0.031, 'N': 0.013, 'Q': 0.037, 'P': 0.01, 'S': 0.182, 'R': -0.161, 'T': 0.1, 'W': -0.071, 'V': 0.051, 'Y': -0.263}, 7: {'A': 0.003, 'C': 0.0, 'E': 0.0, 'D': 0.001, 'G': -0.0, 'F': -0.002, 'I': -0.003, 'H': -0.001, 'K': -0.0, 'M': -0.002, 'L': -0.004, 'N': 0.0, 'Q': -0.0, 'P': 0.007, 'S': 0.0, 'R': 0.001, 'T': -0.0, 'W': -0.001, 'V': -0.001, 'Y': -0.0}, 8: {'A': 0.037, 'C': 0.009, 'E': 0.008, 'D': 0.013, 'G': 0.015, 'F': 0.044, 'I': -0.197, 'H': 0.031, 'K': -0.011, 'M': -0.067, 'L': -0.054, 'N': 0.018, 'Q': -0.002, 'P': 0.024, 'S': 0.022, 'R': 0.023, 'T': 0.036, 'W': 0.007, 'V': 0.011, 'Y': 0.033}, -1: {'con': 1.95721}}
| 2,274
| 2,274
| 0.387423
| 557
| 2,274
| 1.576302
| 0.217235
| 0.127563
| 0.017084
| 0.020501
| 0.249431
| 0.121868
| 0.121868
| 0.121868
| 0.075171
| 0.075171
| 0
| 0.365071
| 0.164028
| 2,274
| 1
| 2,274
| 2,274
| 0.096791
| 0
| 0
| 0
| 0
| 0
| 0.08044
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5da95843c397ef3076ed187cd900723658a522d5
| 356
|
py
|
Python
|
snips_nlu/dataset/__init__.py
|
CharlyBlavier/snips-nlu-Copy
|
829d513ac464e0421a264fd64d8b94f59a09875e
|
[
"Apache-2.0"
] | 3,764
|
2018-02-27T08:25:52.000Z
|
2022-03-30T17:59:22.000Z
|
snips_nlu/dataset/__init__.py
|
unicorns18/snips-nlu
|
74b2893c91fc0bafc919a7e088ecb0b2bd611acf
|
[
"Apache-2.0"
] | 305
|
2018-02-28T13:45:23.000Z
|
2022-03-10T15:33:35.000Z
|
snips_nlu/dataset/__init__.py
|
unicorns18/snips-nlu
|
74b2893c91fc0bafc919a7e088ecb0b2bd611acf
|
[
"Apache-2.0"
] | 559
|
2018-03-04T15:44:15.000Z
|
2022-03-21T17:00:21.000Z
|
from snips_nlu.dataset.dataset import Dataset
from snips_nlu.dataset.entity import Entity
from snips_nlu.dataset.intent import Intent
from snips_nlu.dataset.utils import (
extract_intent_entities, extract_utterance_entities,
get_dataset_gazetteer_entities, get_text_from_chunks)
from snips_nlu.dataset.validation import validate_and_format_dataset
| 44.5
| 68
| 0.867978
| 51
| 356
| 5.705882
| 0.372549
| 0.154639
| 0.206186
| 0.32646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 356
| 7
| 69
| 50.857143
| 0.898148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.714286
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5db22e0ef48431fd0568b3008330713069706db0
| 101
|
py
|
Python
|
terrascript/azurerm/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 4
|
2022-02-07T21:08:14.000Z
|
2022-03-03T04:41:28.000Z
|
terrascript/azurerm/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/azurerm/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T01:49:42.000Z
|
2022-02-08T14:15:00.000Z
|
# terrascript/azurerm/__init__.py
import terrascript
class azurerm(terrascript.Provider):
pass
| 14.428571
| 36
| 0.792079
| 11
| 101
| 6.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 6
| 37
| 16.833333
| 0.863636
| 0.306931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5d4282ec26713564e7e4edbc3eff8c76ee845196
| 285
|
py
|
Python
|
meracanapi/apitelemac/telemac/exceptions.py
|
meracan/meracan-api
|
aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4
|
[
"MIT"
] | null | null | null |
meracanapi/apitelemac/telemac/exceptions.py
|
meracan/meracan-api
|
aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4
|
[
"MIT"
] | null | null | null |
meracanapi/apitelemac/telemac/exceptions.py
|
meracan/meracan-api
|
aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4
|
[
"MIT"
] | null | null | null |
"""
Telemac-Mascaret exceptions
"""
class TelemacException(Exception):
""" Generic exception class for all of Telemac-Mascaret Exceptions """
pass
class MascaretException(TelemacException):
""" Generic exception class for all of Telemac-Mascaret Exceptions """
pass
| 21.923077
| 74
| 0.733333
| 29
| 285
| 7.206897
| 0.413793
| 0.215311
| 0.358852
| 0.229665
| 0.555024
| 0.555024
| 0.555024
| 0.555024
| 0.555024
| 0.555024
| 0
| 0
| 0.17193
| 285
| 12
| 75
| 23.75
| 0.885593
| 0.54386
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
5d5be4202288e9e45b788058d3104ddc1aee63b2
| 2,818
|
py
|
Python
|
scripts/modules/functions/restraints.py
|
andrrizzi/tfep-revisited-2021
|
9a9aff61286be3111c4e70136620d0e3aac31318
|
[
"MIT"
] | 7
|
2021-07-22T00:53:37.000Z
|
2022-03-11T07:29:36.000Z
|
scripts/modules/functions/restraints.py
|
andrrizzi/tfep-revisited-2021
|
9a9aff61286be3111c4e70136620d0e3aac31318
|
[
"MIT"
] | 2
|
2021-08-24T07:54:55.000Z
|
2021-09-14T08:51:55.000Z
|
scripts/modules/functions/restraints.py
|
andrrizzi/tfep-revisited-2021
|
9a9aff61286be3111c4e70136620d0e3aac31318
|
[
"MIT"
] | 1
|
2021-07-22T00:53:56.000Z
|
2021-07-22T00:53:56.000Z
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Function to compute restraint potentials with PyTorch.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import torch
# =============================================================================
# RESTRAINTS IMPLEMENTED IN PLUMED
# =============================================================================
def _walls_plumed(arg, at, kappa, offset=0.0, exp=2.0, eps=1.0, upper_wall=True):
# We apply the bias only if arg > at - offset.
dist = arg - at + offset
if upper_wall:
dist = torch.nn.functional.relu(dist)
else: # Lower wall
dist = torch.nn.functional.relu(-dist)
return kappa * (dist / eps)**exp
def upper_walls_plumed(*args, **kwargs):
"""A restraint that is zero if the argument is above a certain threshold.
The restraint potential energy is given by
kappa * ((arg - at + offset) / eps)**exp
if arg - at + offset is greater than 0, and 0 otherwise.
Parameters
----------
arg : torch.Tensor
A 1D tensor of size N with the input variables for the restraint.
at : float, torch.Tensor
The threshold at which (without an offset) the restraint kicks in.
offset : float, torch.Tensor
An offset for the argument treshold.
kappa : float, torch.Tensor
The restraint force constant.
exp : float, torch.Tensor
The exponent of the displacement.
Returns
-------
energy : torch.Tensor
A 1D tensor of size N with the energy for each arg.
"""
return _walls_plumed(*args, upper_wall=True, **kwargs)
def lower_walls_plumed(*args, **kwargs):
"""A restraint that is zero if the argument is below a certain threshold.
The restraint potential energy is given by
kappa * ((arg - at + offset) / eps)**exp
if arg - at + offset is less than 0, and 0 otherwise.
Parameters
----------
arg : torch.Tensor
A 1D tensor of size N with the input variables for the restraint.
at : float, torch.Tensor
The threshold at which (without an offset) the restraint kicks in.
offset : float, torch.Tensor
An offset for the argument treshold.
kappa : float, torch.Tensor
The restraint force constant.
exp : float, torch.Tensor
The exponent of the displacement.
Returns
-------
energy : torch.Tensor
A 1D tensor of size N with the energy for each arg.
"""
return _walls_plumed(*args, upper_wall=False, **kwargs)
| 29.978723
| 81
| 0.533712
| 327
| 2,818
| 4.556575
| 0.275229
| 0.088591
| 0.085906
| 0.07651
| 0.781208
| 0.781208
| 0.781208
| 0.736913
| 0.736913
| 0.736913
| 0
| 0.006404
| 0.224273
| 2,818
| 93
| 82
| 30.301075
| 0.675206
| 0.744145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.083333
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53731b2b693939beca72720c89488d1d8cb2b1d1
| 28,274
|
py
|
Python
|
tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py
|
ik9999/optuna
|
c2e528a3f2b8d67075cec117ecc964601f609cc3
|
[
"MIT"
] | null | null | null |
tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py
|
ik9999/optuna
|
c2e528a3f2b8d67075cec117ecc964601f609cc3
|
[
"MIT"
] | null | null | null |
tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py
|
ik9999/optuna
|
c2e528a3f2b8d67075cec117ecc964601f609cc3
|
[
"MIT"
] | null | null | null |
import itertools
import random
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from unittest.mock import patch
from unittest.mock import PropertyMock
import numpy as np
import pytest
import optuna
from optuna.samplers import _tpe
from optuna.samplers import TPESampler
class MockSystemAttr:
def __init__(self) -> None:
self.value = {} # type: Dict[str, dict]
def set_trial_system_attr(self, _: int, key: str, value: dict) -> None:
self.value[key] = value
def test_multi_objective_sample_independent_seed_fix() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.UniformDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) == suggestion
sampler = TPESampler(seed=1)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_multi_objective_sample_independent_prior() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.UniformDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
sampler = TPESampler(consider_prior=False, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(prior_weight=0.5, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_multi_objective_sample_independent_n_startup_trial() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.UniformDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(n_startup_trials=16, seed=0)
attrs = MockSystemAttr()
with patch.object(
study._storage, "get_all_trials", return_value=past_trials[:15]
), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(
study._storage, "get_trial", return_value=trial
), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2, patch.object(
optuna.samplers.RandomSampler,
"sample_independent",
return_value=1.0,
) as sample_method:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
sampler.sample_independent(study, trial, "param-a", dist)
assert sample_method.call_count == 1
sampler = TPESampler(n_startup_trials=16, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2, patch.object(
optuna.samplers.RandomSampler,
"sample_independent",
return_value=1.0,
) as sample_method:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
sampler.sample_independent(study, trial, "param-a", dist)
assert sample_method.call_count == 0
def test_multi_objective_sample_independent_misc_arguments() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.UniformDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(32)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
# Test misc. parameters.
sampler = TPESampler(n_ei_candidates=13, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(gamma=lambda _: 1, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(weights=lambda n: np.zeros(n), seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_multi_objective_sample_independent_uniform_distributions() -> None:
# Prepare sample from uniform distribution for cheking other distributions.
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
uni_dist = optuna.distributions.UniformDistribution(1.0, 100.0)
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
uniform_suggestion = sampler.sample_independent(study, trial, "param-a", uni_dist)
assert 1.0 <= uniform_suggestion < 100.0
def test_multi_objective_sample_independent_log_uniform_distributions() -> None:
"""Prepare sample from uniform distribution for cheking other distributions."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
uni_dist = optuna.distributions.UniformDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
uniform_suggestion = sampler.sample_independent(study, trial, "param-a", uni_dist)
# Test sample from log-uniform is different from uniform.
log_dist = optuna.distributions.LogUniformDistribution(1.0, 100.0)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], log_dist) for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
loguniform_suggestion = sampler.sample_independent(study, trial, "param-a", log_dist)
assert 1.0 <= loguniform_suggestion < 100.0
assert uniform_suggestion != loguniform_suggestion
def test_multi_objective_sample_independent_disrete_uniform_distributions() -> None:
"""Test samples from discrete have expected intervals."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
disc_dist = optuna.distributions.DiscreteUniformDistribution(1.0, 100.0, 0.1)
def value_fn(idx: int) -> float:
return int(random.random() * 1000) * 0.1
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=disc_dist, value_fn=value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
discrete_uniform_suggestion = sampler.sample_independent(
study, trial, "param-a", disc_dist
)
assert 1.0 <= discrete_uniform_suggestion <= 100.0
assert abs(int(discrete_uniform_suggestion * 10) - discrete_uniform_suggestion * 10) < 1e-3
def test_multi_objective_sample_independent_categorical_distributions() -> None:
"""Test samples are drawn from the specified category."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
categories = [i * 0.3 + 1.0 for i in range(330)]
def cat_value_fn(idx: int) -> float:
return categories[random.randint(0, len(categories) - 1)]
cat_dist = optuna.distributions.CategoricalDistribution(categories)
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=cat_dist, value_fn=cat_value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
categorical_suggestion = sampler.sample_independent(study, trial, "param-a", cat_dist)
assert categorical_suggestion in categories
def test_multi_objective_sample_int_uniform_distributions() -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
def int_value_fn(idx: int) -> float:
return random.randint(0, 100)
int_dist = optuna.distributions.IntUniformDistribution(1, 100)
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=int_dist, value_fn=int_value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
int_suggestion = sampler.sample_independent(study, trial, "param-a", int_dist)
assert 1 <= int_suggestion <= 100
assert isinstance(int_suggestion, int)
@pytest.mark.parametrize(
"state",
[
(optuna.trial.TrialState.FAIL,),
(optuna.trial.TrialState.PRUNED,),
(optuna.trial.TrialState.RUNNING,),
(optuna.trial.TrialState.WAITING,),
],
)
def test_multi_objective_sample_independent_handle_unsuccessful_states(
state: optuna.trial.TrialState,
) -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.UniformDistribution(1.0, 100.0)
random.seed(128)
# Prepare sampling result for later tests.
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(32)]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
all_success_suggestion = sampler.sample_independent(study, trial, "param-a", dist)
# Test unsuccessful trials are handled differently.
state_fn = build_state_fn(state)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], state_fn=state_fn)
for i in range(32)
]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
partial_unsuccessful_suggestion = sampler.sample_independent(study, trial, "param-a", dist)
assert partial_unsuccessful_suggestion != all_success_suggestion
def test_multi_objective_sample_independent_ignored_states() -> None:
"""Tests FAIL, RUNNING, and WAITING states are equally."""
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.UniformDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
]:
random.seed(128)
state_fn = build_state_fn(state)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], state_fn=state_fn)
for i in range(32)
]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestions.append(sampler.sample_independent(study, trial, "param-a", dist))
assert len(set(suggestions)) == 1
def test_multi_objective_get_observation_pairs() -> None:
def objective(trial: optuna.trial.Trial) -> Tuple[float, float]:
trial.suggest_int("x", 5, 5)
return 5.0, 5.0
sampler = TPESampler(seed=0)
study = optuna.create_study(directions=["minimize", "maximize"], sampler=sampler)
study.optimize(objective, n_trials=5)
assert _tpe.sampler._get_observation_pairs(study, ["x"], False) == (
{"x": [5.0, 5.0, 5.0, 5.0, 5.0]},
[(-float("inf"), [5.0, -5.0]) for _ in range(5)],
)
assert _tpe.sampler._get_observation_pairs(study, ["y"], False) == (
{"y": [None, None, None, None, None]},
[(-float("inf"), [5.0, -5.0]) for _ in range(5)],
)
assert _tpe.sampler._get_observation_pairs(study, ["x"], True) == (
{"x": [5.0, 5.0, 5.0, 5.0, 5.0]},
[(-float("inf"), [5.0, -5.0]) for _ in range(5)],
)
assert _tpe.sampler._get_observation_pairs(study, ["y"], True) == ({"y": []}, [])
def test_calculate_nondomination_rank() -> None:
# Single objective
test_case = np.asarray([[10], [20], [20], [30]])
ranks = list(_tpe.sampler._calculate_nondomination_rank(test_case))
assert ranks == [0, 1, 1, 2]
# Two objectives
test_case = np.asarray([[10, 30], [10, 10], [20, 20], [30, 10], [15, 15]])
ranks = list(_tpe.sampler._calculate_nondomination_rank(test_case))
assert ranks == [1, 0, 2, 1, 1]
# Three objectives
test_case = np.asarray([[5, 5, 4], [5, 5, 5], [9, 9, 0], [5, 7, 5], [0, 0, 9], [0, 9, 9]])
ranks = list(_tpe.sampler._calculate_nondomination_rank(test_case))
assert ranks == [0, 1, 0, 2, 0, 1]
def test_calculate_weights_below_for_multi_objective() -> None:
# Two samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": [1.0, 2.0, 3.0]}, [(0, [0.2, 0.5]), (0, [0.9, 0.4]), (0, [1, 1])], np.array([0, 1])
)
assert len(weights_below) == 2
assert weights_below[0] > weights_below[1]
assert sum(weights_below) > 0
# Two equally contributed samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": [1.0, 2.0, 3.0]}, [(0, [0.2, 0.8]), (0, [0.8, 0.2]), (0, [1, 1])], np.array([0, 1])
)
assert len(weights_below) == 2
assert weights_below[0] == weights_below[1]
assert sum(weights_below) > 0
# Duplicated samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": [1.0, 2.0, 3.0]}, [(0, [0.2, 0.8]), (0, [0.2, 0.8]), (0, [1, 1])], np.array([0, 1])
)
assert len(weights_below) == 2
assert weights_below[0] == weights_below[1]
assert sum(weights_below) > 0
# Three samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": [1.0, 2.0, 3.0, 4.0]},
[(0, [0.3, 0.3]), (0, [0.2, 0.8]), (0, [0.8, 0.2]), (0, [1, 1])],
np.array([0, 1, 2]),
)
assert len(weights_below) == 3
assert weights_below[0] > weights_below[1]
assert weights_below[0] > weights_below[2]
assert weights_below[1] == weights_below[2]
assert sum(weights_below) > 0
def test_solve_hssp() -> None:
random.seed(128)
# Two dimensions
for i in range(8):
subset_size = int(random.random() * i) + 1
test_case = np.asarray([[random.random(), random.random()] for _ in range(8)])
r = 1.1 * np.max(test_case, axis=0)
truth = 0.0
for subset in itertools.permutations(test_case, subset_size):
truth = max(truth, _tpe.sampler._compute_hypervolume(np.asarray(subset), r))
indices = _tpe.sampler._solve_hssp(test_case, np.arange(len(test_case)), subset_size, r)
approx = _tpe.sampler._compute_hypervolume(test_case[indices], r)
assert approx / truth > 0.6321 # 1 - 1/e
# Three dimensions
for i in range(8):
subset_size = int(random.random() * i) + 1
test_case = np.asarray(
[[random.random(), random.random(), random.random()] for _ in range(8)]
)
r = 1.1 * np.max(test_case, axis=0)
truth = 0
for subset in itertools.permutations(test_case, subset_size):
truth = max(truth, _tpe.sampler._compute_hypervolume(np.asarray(subset), r))
indices = _tpe.sampler._solve_hssp(test_case, np.arange(len(test_case)), subset_size, r)
approx = _tpe.sampler._compute_hypervolume(test_case[indices], r)
assert approx / truth > 0.6321 # 1 - 1/e
def frozen_trial_factory(
number: int,
values: List[float],
dist: optuna.distributions.BaseDistribution = optuna.distributions.UniformDistribution(
1.0, 100.0
),
value_fn: Optional[Callable[[int], Union[int, float]]] = None,
state_fn: Callable[
[int], optuna.trial.TrialState
] = lambda _: optuna.trial.TrialState.COMPLETE,
) -> optuna.trial.FrozenTrial:
if value_fn is None:
value = random.random() * 99.0 + 1.0
else:
value = value_fn(number)
trial = optuna.trial.FrozenTrial(
number=number,
trial_id=number,
state=optuna.trial.TrialState.COMPLETE,
value=None,
datetime_start=None,
datetime_complete=None,
params={"param-a": value},
distributions={"param-a": dist},
user_attrs={},
system_attrs={},
intermediate_values={},
values=values,
)
return trial
def build_state_fn(state: optuna.trial.TrialState) -> Callable[[int], optuna.trial.TrialState]:
def state_fn(idx: int) -> optuna.trial.TrialState:
return [optuna.trial.TrialState.COMPLETE, state][idx % 2]
return state_fn
| 42.453453
| 99
| 0.685895
| 3,631
| 28,274
| 5.096943
| 0.064996
| 0.051116
| 0.054466
| 0.078295
| 0.836278
| 0.824337
| 0.809694
| 0.791484
| 0.775274
| 0.75874
| 0
| 0.026529
| 0.190741
| 28,274
| 665
| 100
| 42.517293
| 0.782308
| 0.030735
| 0
| 0.676259
| 0
| 0
| 0.10117
| 0.068311
| 0
| 0
| 0
| 0
| 0.07554
| 1
| 0.043165
| false
| 0
| 0.026978
| 0.007194
| 0.084532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53c2e6915f90d0d58dcf6981fb4e48aac65a2029
| 113
|
py
|
Python
|
xbee/tornado/ieee.py
|
PowerFlex/python-xbee-intercept
|
0c07f3a5f16f479ad7c925cd31638598030cf5a7
|
[
"MIT"
] | 65
|
2015-12-06T02:38:28.000Z
|
2017-09-05T16:46:07.000Z
|
xbee/tornado/ieee.py
|
PowerFlex/python-xbee-intercept
|
0c07f3a5f16f479ad7c925cd31638598030cf5a7
|
[
"MIT"
] | 44
|
2015-10-23T15:33:54.000Z
|
2017-09-01T06:39:50.000Z
|
xbee/tornado/ieee.py
|
PowerFlex/python-xbee-intercept
|
0c07f3a5f16f479ad7c925cd31638598030cf5a7
|
[
"MIT"
] | 43
|
2015-12-15T02:52:21.000Z
|
2017-06-24T17:14:53.000Z
|
from xbee.tornado.base import XBeeBase
import xbee.backend as _xbee
class XBee(_xbee.XBee, XBeeBase):
pass
| 16.142857
| 38
| 0.769912
| 17
| 113
| 5
| 0.588235
| 0.188235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159292
| 113
| 6
| 39
| 18.833333
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
53cb68c5ace8dc97c23abeaf69c885638d283f98
| 63,242
|
py
|
Python
|
ModelUncertainty-NYCData/Integer_Order_Models_I1-I2-I3-T1/Model7_Prediction_All.py
|
ehsankharazmi/PINN-COVID
|
b831de387b25a69f95ace8afff1e59967be41e71
|
[
"MIT"
] | 8
|
2021-09-20T16:20:14.000Z
|
2022-03-29T00:32:36.000Z
|
ModelUncertainty-NYCData/Integer_Order_Models_I1-I2-I3-T1/Model7_Prediction_All.py
|
ehsankharazmi/PINN-COVID
|
b831de387b25a69f95ace8afff1e59967be41e71
|
[
"MIT"
] | null | null | null |
ModelUncertainty-NYCData/Integer_Order_Models_I1-I2-I3-T1/Model7_Prediction_All.py
|
ehsankharazmi/PINN-COVID
|
b831de387b25a69f95ace8afff1e59967be41e71
|
[
"MIT"
] | 1
|
2021-11-30T17:20:10.000Z
|
2021-11-30T17:20:10.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 20:46:24 2021
@author: Administrator
"""
import sys
sys.path.insert(0, '../../Utilities/')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas
import math
from math import gamma
from scipy.integrate import odeint
import matplotlib.dates as mdates
import tensorflow as tf
import numpy as np
from numpy import *
# from numpy import matlib as mb
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
import time
from itertools import product, combinations
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#from plotting import newfig, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import datetime
from pyDOE import lhs
# from scipy.special import gamma
start_time = time.time()
from datetime import datetime
now = datetime.now()
# dt_string = now.strftime("%m-%d-%H-%M")
# dt_string = now.strftime("%m-%d")
dt_string = '03-13'
# Load Data
data_frame = pandas.read_csv('Data/data-by-day.csv')
I_new_star = data_frame['CASE_COUNT'] #T x 1 array
H_new_star = data_frame['HOSPITALIZED_COUNT'] #T x 1 array
D_new_star = data_frame['DEATH_COUNT'] #T x 1 array
#7 days average
I_new_star = I_new_star.rolling(window=7).mean()
H_new_star = H_new_star.rolling(window=7).mean()
D_new_star = D_new_star.rolling(window=7).mean()
I_new_star = I_new_star.to_numpy(dtype=np.float64)
H_new_star = H_new_star.to_numpy(dtype=np.float64)
D_new_star = D_new_star.to_numpy(dtype=np.float64)
I_new_star = I_new_star[6:]
H_new_star = H_new_star[6:]
D_new_star = D_new_star[6:]
I_new_star = I_new_star.reshape([len(I_new_star), 1])
H_new_star = H_new_star.reshape([len(H_new_star), 1])
D_new_star = D_new_star.reshape([len(D_new_star), 1])
I_sum_star = np.cumsum(I_new_star)
H_sum_star = np.cumsum(H_new_star)
D_sum_star = np.cumsum(D_new_star)
I_sum_star = I_sum_star.reshape([len(I_sum_star), 1])
H_sum_star = H_sum_star.reshape([len(H_sum_star), 1])
D_sum_star = D_sum_star.reshape([len(D_sum_star), 1])
t_star = np.arange(len(I_new_star))
t_star = t_star.reshape([len(t_star),1])
N = 8.399e6
first_date = '2020-03-06' #first_date[6:]+'-'+first_date[0:2]+'-'+first_date[3:5]
last_date = '2021-03-11' #last_date[6:]+'-'+last_date[0:2]+'-'+str(int(last_date[3:5])+1)
first_date_pred = '2021-03-09' #last_date[6:]+'-'+last_date[0:2]+'-'+str(int(last_date[3:5])-1)
last_date_pred = '2021-07-01'
date_total = np.arange(first_date, last_date, dtype='datetime64[D]')[:,None]
data_mean = np.arange(first_date, last_date, dtype='datetime64[D]')[:,None]
data_pred = np.arange(first_date_pred, last_date_pred, dtype='datetime64[D]')[:,None]
sf = 1e-4
# load data
BetaI_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/BetaI_pred_mean.txt')
p_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/p_pred_mean.txt')
q_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/q_pred_mean.txt')
t_mean = np.arange(len(BetaI_PINN))
S_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/S_pred_mean.txt')
S_PINN = S_PINN/sf
I_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/I_pred_mean.txt')
I_PINN = I_PINN/sf
J_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/J_pred_mean.txt')
J_PINN = J_PINN/sf
H_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/H_pred_mean.txt')
H_PINN = H_PINN/sf
D_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/D_pred_mean.txt')
D_PINN = D_PINN/sf
R_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/R_pred_mean.txt')
R_PINN = R_PINN/sf
I_sum_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/I_sum_pred_mean.txt')
I_sum_PINN = I_sum_PINN/sf
H_sum_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/H_sum_pred_mean.txt')
H_sum_PINN = H_sum_PINN/sf
I_new_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/I_new_pred_mean.txt')
I_new_PINN = I_new_PINN/sf
H_new_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/H_new_pred_mean.txt')
H_new_PINN = H_new_PINN/sf
D_new_PINN = np.loadtxt('Model7/Train-Results-'+dt_string+'-Average/D_new_pred_mean.txt')
D_new_PINN = D_new_PINN/sf
#%%
# #Interpolations
Beta_interp = scipy.interpolate.interp1d(t_mean.flatten(), BetaI_PINN.flatten(), fill_value="extrapolate")
#%%
######################################################################
################ Predicting by sloving forward problem ###############
######################################################################
#%%
#Initial conditions for ODE system
S_init = float(S_PINN[-2])
I_init = float(I_PINN[-2])
J_init = float(J_PINN[-2])
D_init = float(D_PINN[-2])
H_init = float(H_PINN[-2])
R_init = float(R_PINN[-2])
I_sum_init = float(I_sum_PINN[-2])
D_sum_init = float(D_PINN[-2])
H_sum_init = float(H_sum_PINN[-2])
U_init = [S_init, I_init, J_init, D_init, H_init, R_init, I_sum_init, H_sum_init]
#Parameters
#Parameters
eps1 = 0.75
eps2 = 0.0
delta = 0.6
alpha = 1.0/5.2
Gamma = 1.0/6.0
gammaA = 1.0/6.0
phiD = 1.0/15.0
phiR = 1.0/7.5
p_mean = p_PINN[-2]
q_mean = q_PINN[-2]
t_pred = np.arange(len(t_mean)-2, len(t_mean)+len(data_pred)-2)
t_pred = t_pred.reshape([len(t_pred),1])
#%%
#Piecewise linear vacination
def V_1(Dt):
# return 0
if Dt <= 300:
return 0
elif Dt<350:
return (Dt-300)*600
else:
return 30000
def V_2(Dt):
# return 0
if Dt <= 300:
return 0
elif Dt<350:
return (Dt-300)*1200
else:
return 60000
#ODEs
def ODEs_mean_1(X, t, xi, Pert):
S, I, J, D, H, R, sumI, sumH = X
dSdt = -(BetaI_PINN[-1] * (1+xi*Pert) *(I+eps1*J+eps2*H)/N)*S - V_1(t)/N*S
delay = (BetaI_PINN[-1] * (1+xi*Pert) *(I+eps1*J+eps2*H)/N)*S
dIdt = delta*delay - Gamma*I
dJdt = (1-delta)*delay - gammaA*J
dDdt = (q_mean*phiD)*H
dHdt = (p_mean*Gamma)*I - (q_mean*phiD) * H - ((1-q_mean)*phiR) * H
dRdt = gammaA*J + ((1-p_mean)*Gamma)*I + ((1-q_mean)*phiR)*H + V_1(t)/N*S
dsumIdt = delta*delay
dsumHdt = (p_mean*Gamma)*I
return [dSdt, dIdt, dJdt, dDdt, dHdt, dRdt, dsumIdt, dsumHdt]
def ODEs_mean_2(X, t, xi, Pert):
S, I, J, D, H, R, sumI, sumH = X
dSdt = -(BetaI_PINN[-1] * (1+xi*Pert) *(I+eps1*J+eps2*H)/N)*S - V_2(t)/N*S
delay = (BetaI_PINN[-1] * (1+xi*Pert) *(I+eps1*J+eps2*H)/N)*S
dIdt = delta*delay - Gamma*I
dJdt = (1-delta)*delay - gammaA*J
dDdt = (q_mean*phiD)*H
dHdt = (p_mean*Gamma)*I - (q_mean*phiD) * H - ((1-q_mean)*phiR) * H
dRdt = gammaA*J + ((1-p_mean)*Gamma)*I + ((1-q_mean)*phiR)*H + V_2(t)/N*S
dsumIdt = delta*delay
dsumHdt = (p_mean*Gamma)*I
return [dSdt, dIdt, dJdt, dDdt, dHdt, dRdt, dsumIdt, dsumHdt]
#%%
#sample points in [-1, 1] as Guass-Labo
N_sample = 10
[Xi,Weights] = np.polynomial.legendre.leggauss(N_sample)
#Solver
Pert0 = 0.175
Pert1 = 0.35
# BetaI_pred_d0 = []
# BetaI_pred_d1 = []
Sol_S_d0 = []
Sol_I_d0 = []
Sol_J_d0 = []
Sol_D_d0 = []
Sol_H_d0 = []
Sol_R_d0 = []
Sol_newI_d0 = []
Sol_newH_d0 = []
Sol_newD_d0 = []
Sol_S_d1 = []
Sol_I_d1 = []
Sol_J_d1 = []
Sol_D_d1 = []
Sol_H_d1 = []
Sol_R_d1 = []
Sol_newI_d1 = []
Sol_newH_d1 = []
Sol_newD_d1 = []
for n in range(N_sample):
xi = Xi[n].reshape([1,1])
#No Vaccine
Sol_d0 = odeint(ODEs_mean_1, U_init, t_pred.flatten(), args = (xi,Pert0))
Sol_S_n_d0 = Sol_d0[:,0]
Sol_I_n_d0 = Sol_d0[:,1]
Sol_J_n_d0 = Sol_d0[:,2]
Sol_D_n_d0 = Sol_d0[:,3]
Sol_H_n_d0 = Sol_d0[:,4]
Sol_R_n_d0 = Sol_d0[:,5]
Sol_sumI_n_d0 = Sol_d0[:,6]
Sol_sumH_n_d0 = Sol_d0[:,7]
Sol_S_d0.append(Sol_S_n_d0.reshape([len(Sol_S_n_d0),1]))
Sol_I_d0.append(Sol_I_n_d0.reshape([len(Sol_I_n_d0),1]))
Sol_J_d0.append(Sol_J_n_d0.reshape([len(Sol_J_n_d0),1]))
Sol_D_d0.append(Sol_D_n_d0.reshape([len(Sol_D_n_d0),1]))
Sol_H_d0.append(Sol_H_n_d0.reshape([len(Sol_H_n_d0),1]))
Sol_R_d0.append(Sol_R_n_d0.reshape([len(Sol_R_n_d0),1]))
Sol_newI_n_d0 = np.diff(Sol_sumI_n_d0)
Sol_newH_n_d0 = np.diff(Sol_sumH_n_d0)
Sol_newD_n_d0 = np.diff(Sol_D_n_d0)
Sol_newI_d0.append(Sol_newI_n_d0.reshape([len(Sol_newI_n_d0),1]))
Sol_newH_d0.append(Sol_newH_n_d0.reshape([len(Sol_newH_n_d0),1]))
Sol_newD_d0.append(Sol_newD_n_d0.reshape([len(Sol_newD_n_d0),1]))
#With Vaccine
Sol_d1 = odeint(ODEs_mean_1, U_init, t_pred.flatten(), args = (xi,Pert1))
Sol_S_n_d1 = Sol_d1[:,0]
Sol_I_n_d1 = Sol_d1[:,1]
Sol_J_n_d1 = Sol_d1[:,2]
Sol_D_n_d1 = Sol_d1[:,3]
Sol_H_n_d1 = Sol_d1[:,4]
Sol_R_n_d1 = Sol_d1[:,5]
Sol_sumI_n_d1 = Sol_d1[:,6]
Sol_sumH_n_d1 = Sol_d1[:,7]
Sol_S_d1.append(Sol_S_n_d1.reshape([len(Sol_S_n_d1),1]))
Sol_I_d1.append(Sol_I_n_d1.reshape([len(Sol_I_n_d1),1]))
Sol_J_d1.append(Sol_J_n_d1.reshape([len(Sol_J_n_d1),1]))
Sol_D_d1.append(Sol_D_n_d1.reshape([len(Sol_D_n_d1),1]))
Sol_H_d1.append(Sol_H_n_d1.reshape([len(Sol_H_n_d1),1]))
Sol_R_d1.append(Sol_R_n_d1.reshape([len(Sol_R_n_d1),1]))
Sol_newI_n_d1 = np.diff(Sol_sumI_n_d1)
Sol_newH_n_d1 = np.diff(Sol_sumH_n_d1)
Sol_newD_n_d1 = np.diff(Sol_D_n_d1)
Sol_newI_d1.append(Sol_newI_n_d1.reshape([len(Sol_newI_n_d1),1]))
Sol_newH_d1.append(Sol_newH_n_d1.reshape([len(Sol_newH_n_d1),1]))
Sol_newD_d1.append(Sol_newD_n_d1.reshape([len(Sol_newD_n_d1),1]))
Sol_S1_d0 = np.asarray(Sol_S_d0, order= 'F')
Sol_S2_d0 = Sol_S1_d0.reshape([N_sample, len(t_pred)])
Sol_S_d0_mat = np.transpose(Sol_S2_d0)
Sol_S_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_S_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_S_d0_mat, Weights)*0.5))
Sol_I1_d0 = np.asarray(Sol_I_d0, order= 'F')
Sol_I2_d0 = Sol_I1_d0.reshape([N_sample, len(t_pred)])
Sol_I_d0_mat = np.transpose(Sol_I2_d0)
Sol_I_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_I_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_I_d0_mat, Weights)*0.5))
Sol_J1_d0 = np.asarray(Sol_J_d0, order= 'F')
Sol_J2_d0 = Sol_J1_d0.reshape([N_sample, len(t_pred)])
Sol_J_d0_mat = np.transpose(Sol_J2_d0)
Sol_J_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_J_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_J_d0_mat, Weights)*0.5))
Sol_D1_d0 = np.asarray(Sol_D_d0, order= 'F')
Sol_D2_d0 = Sol_D1_d0.reshape([N_sample, len(t_pred)])
Sol_D_d0_mat = np.transpose(Sol_D2_d0)
Sol_D_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_D_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_D_d0_mat, Weights)*0.5))
Sol_H1_d0 = np.asarray(Sol_H_d0, order= 'F')
Sol_H2_d0 = Sol_H1_d0.reshape([N_sample, len(t_pred)])
Sol_H_d0_mat = np.transpose(Sol_H2_d0)
Sol_H_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_H_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_H_d0_mat, Weights)*0.5))
Sol_R1_d0 = np.asarray(Sol_R_d0, order= 'F')
Sol_R2_d0 = Sol_R1_d0.reshape([N_sample, len(t_pred)])
Sol_R_d0_mat = np.transpose(Sol_R2_d0)
Sol_R_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_R_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_R_d0_mat, Weights)*0.5))
Sol_newI1_d0 = np.asarray(Sol_newI_d0, order= 'F')
Sol_newI2_d0 = Sol_newI1_d0.reshape([N_sample, len(t_pred)-1])
Sol_newI_d0_mat = np.transpose(Sol_newI2_d0)
Sol_newI_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_newI_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newI_d0_mat, Weights)*0.5))
Sol_newH1_d0 = np.asarray(Sol_newH_d0, order= 'F')
Sol_newH2_d0 = Sol_newH1_d0.reshape([N_sample, len(t_pred)-1])
Sol_newH_d0_mat = np.transpose(Sol_newH2_d0)
Sol_newH_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_newH_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newH_d0_mat, Weights)*0.5))
Sol_newD1_d0 = np.asarray(Sol_newD_d0, order= 'F')
Sol_newD2_d0 = Sol_newD1_d0.reshape([N_sample, len(t_pred)-1])
Sol_newD_d0_mat = np.transpose(Sol_newD2_d0)
Sol_newD_d0_std_V1 = np.sqrt(np.matmul(np.square(Sol_newD_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newD_d0_mat, Weights)*0.5))
Sol_S1_d1 = np.asarray(Sol_S_d1, order= 'F')
Sol_S2_d1 = Sol_S1_d1.reshape([N_sample, len(t_pred)])
Sol_S_d1_mat = np.transpose(Sol_S2_d1)
Sol_S_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_S_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_S_d1_mat, Weights)*0.5))
Sol_I1_d1 = np.asarray(Sol_I_d1, order= 'F')
Sol_I2_d1 = Sol_I1_d1.reshape([N_sample, len(t_pred)])
Sol_I_d1_mat = np.transpose(Sol_I2_d1)
Sol_I_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_I_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_I_d1_mat, Weights)*0.5))
Sol_J1_d1 = np.asarray(Sol_J_d1, order= 'F')
Sol_J2_d1 = Sol_J1_d1.reshape([N_sample, len(t_pred)])
Sol_J_d1_mat = np.transpose(Sol_J2_d1)
Sol_J_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_J_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_J_d1_mat, Weights)*0.5))
Sol_D1_d1 = np.asarray(Sol_D_d1, order= 'F')
Sol_D2_d1 = Sol_D1_d1.reshape([N_sample, len(t_pred)])
Sol_D_d1_mat = np.transpose(Sol_D2_d1)
Sol_D_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_D_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_D_d1_mat, Weights)*0.5))
Sol_H1_d1 = np.asarray(Sol_H_d1, order= 'F')
Sol_H2_d1 = Sol_H1_d1.reshape([N_sample, len(t_pred)])
Sol_H_d1_mat = np.transpose(Sol_H2_d1)
Sol_H_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_H_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_H_d1_mat, Weights)*0.5))
Sol_R1_d1 = np.asarray(Sol_R_d1, order= 'F')
Sol_R2_d1 = Sol_R1_d1.reshape([N_sample, len(t_pred)])
Sol_R_d1_mat = np.transpose(Sol_R2_d1)
Sol_R_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_R_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_R_d1_mat, Weights)*0.5))
Sol_newI1_d1 = np.asarray(Sol_newI_d1, order= 'F')
Sol_newI2_d1 = Sol_newI1_d1.reshape([N_sample, len(t_pred)-1])
Sol_newI_d1_mat = np.transpose(Sol_newI2_d1)
Sol_newI_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_newI_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newI_d1_mat, Weights)*0.5))
Sol_newH1_d1 = np.asarray(Sol_newH_d1, order= 'F')
Sol_newH2_d1 = Sol_newH1_d1.reshape([N_sample, len(t_pred)-1])
Sol_newH_d1_mat = np.transpose(Sol_newH2_d1)
Sol_newH_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_newH_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newH_d1_mat, Weights)*0.5))
Sol_newD1_d1 = np.asarray(Sol_newD_d1, order= 'F')
Sol_newD2_d1 = Sol_newD1_d1.reshape([N_sample, len(t_pred)-1])
Sol_newD_d1_mat = np.transpose(Sol_newD2_d1)
Sol_newD_d1_std_V1 = np.sqrt(np.matmul(np.square(Sol_newD_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newD_d1_mat, Weights)*0.5))
Sol_mean_V1 = odeint(ODEs_mean_1, U_init, t_pred.flatten(), args = (0,0))
Sol_S_mean_V1 = Sol_mean_V1[:,0]
Sol_I_mean_V1 = Sol_mean_V1[:,1]
Sol_J_mean_V1 = Sol_mean_V1[:,2]
Sol_D_mean_V1 = Sol_mean_V1[:,3]
Sol_H_mean_V1 = Sol_mean_V1[:,4]
Sol_R_mean_V1 = Sol_mean_V1[:,5]
Sol_sumI_mean_V1 = Sol_mean_V1[:,6]
Sol_sumH_mean_V1 = Sol_mean_V1[:,7]
Sol_newI_mean_V1 = np.diff(Sol_sumI_mean_V1)
Sol_newH_mean_V1 = np.diff(Sol_sumH_mean_V1)
Sol_newD_mean_V1 = np.diff(Sol_D_mean_V1)
Diff_newI = I_new_PINN[-1]/Sol_newI_mean_V1[0]
Diff_newH = H_new_PINN[-1]/Sol_newH_mean_V1[0]
Diff_newD = D_new_PINN[-1]/Sol_newD_mean_V1[0]
Sol_newI_mean_V1 = Sol_newI_mean_V1 * Diff_newI
Sol_newH_mean_V1 = Sol_newH_mean_V1 * Diff_newH
Sol_newD_mean_V1 = Sol_newD_mean_V1 * Diff_newD
#%%
#sample points in [-1, 1] as Guass-Labo
N_sample = 10
[Xi,Weights] = np.polynomial.legendre.leggauss(N_sample)
#Solver
Pert0 = 0.175
Pert1 = 0.35
# BetaI_pred_d0 = []
# BetaI_pred_d1 = []
Sol_S_d0 = []
Sol_I_d0 = []
Sol_J_d0 = []
Sol_D_d0 = []
Sol_H_d0 = []
Sol_R_d0 = []
Sol_newI_d0 = []
Sol_newH_d0 = []
Sol_newD_d0 = []
Sol_S_d1 = []
Sol_I_d1 = []
Sol_J_d1 = []
Sol_D_d1 = []
Sol_H_d1 = []
Sol_R_d1 = []
Sol_newI_d1 = []
Sol_newH_d1 = []
Sol_newD_d1 = []
for n in range(N_sample):
xi = Xi[n].reshape([1,1])
#No Vaccine
Sol_d0 = odeint(ODEs_mean_2, U_init, t_pred.flatten(), args = (xi,Pert0))
Sol_S_n_d0 = Sol_d0[:,0]
Sol_I_n_d0 = Sol_d0[:,1]
Sol_J_n_d0 = Sol_d0[:,2]
Sol_D_n_d0 = Sol_d0[:,3]
Sol_H_n_d0 = Sol_d0[:,4]
Sol_R_n_d0 = Sol_d0[:,5]
Sol_sumI_n_d0 = Sol_d0[:,6]
Sol_sumH_n_d0 = Sol_d0[:,7]
Sol_S_d0.append(Sol_S_n_d0.reshape([len(Sol_S_n_d0),1]))
Sol_I_d0.append(Sol_I_n_d0.reshape([len(Sol_I_n_d0),1]))
Sol_J_d0.append(Sol_J_n_d0.reshape([len(Sol_J_n_d0),1]))
Sol_D_d0.append(Sol_D_n_d0.reshape([len(Sol_D_n_d0),1]))
Sol_H_d0.append(Sol_H_n_d0.reshape([len(Sol_H_n_d0),1]))
Sol_R_d0.append(Sol_R_n_d0.reshape([len(Sol_R_n_d0),1]))
Sol_newI_n_d0 = np.diff(Sol_sumI_n_d0)
Sol_newH_n_d0 = np.diff(Sol_sumH_n_d0)
Sol_newD_n_d0 = np.diff(Sol_D_n_d0)
Sol_newI_d0.append(Sol_newI_n_d0.reshape([len(Sol_newI_n_d0),1]))
Sol_newH_d0.append(Sol_newH_n_d0.reshape([len(Sol_newH_n_d0),1]))
Sol_newD_d0.append(Sol_newD_n_d0.reshape([len(Sol_newD_n_d0),1]))
#With Vaccine
Sol_d1 = odeint(ODEs_mean_2, U_init, t_pred.flatten(), args = (xi,Pert1))
Sol_S_n_d1 = Sol_d1[:,0]
Sol_I_n_d1 = Sol_d1[:,1]
Sol_J_n_d1 = Sol_d1[:,2]
Sol_D_n_d1 = Sol_d1[:,3]
Sol_H_n_d1 = Sol_d1[:,4]
Sol_R_n_d1 = Sol_d1[:,5]
Sol_sumI_n_d1 = Sol_d1[:,6]
Sol_sumH_n_d1 = Sol_d1[:,7]
Sol_S_d1.append(Sol_S_n_d1.reshape([len(Sol_S_n_d1),1]))
Sol_I_d1.append(Sol_I_n_d1.reshape([len(Sol_I_n_d1),1]))
Sol_J_d1.append(Sol_J_n_d1.reshape([len(Sol_J_n_d1),1]))
Sol_D_d1.append(Sol_D_n_d1.reshape([len(Sol_D_n_d1),1]))
Sol_H_d1.append(Sol_H_n_d1.reshape([len(Sol_H_n_d1),1]))
Sol_R_d1.append(Sol_R_n_d1.reshape([len(Sol_R_n_d1),1]))
Sol_newI_n_d1 = np.diff(Sol_sumI_n_d1)
Sol_newH_n_d1 = np.diff(Sol_sumH_n_d1)
Sol_newD_n_d1 = np.diff(Sol_D_n_d1)
Sol_newI_d1.append(Sol_newI_n_d1.reshape([len(Sol_newI_n_d1),1]))
Sol_newH_d1.append(Sol_newH_n_d1.reshape([len(Sol_newH_n_d1),1]))
Sol_newD_d1.append(Sol_newD_n_d1.reshape([len(Sol_newD_n_d1),1]))
Sol_S1_d0 = np.asarray(Sol_S_d0, order= 'F')
Sol_S2_d0 = Sol_S1_d0.reshape([N_sample, len(t_pred)])
Sol_S_d0_mat = np.transpose(Sol_S2_d0)
Sol_S_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_S_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_S_d0_mat, Weights)*0.5))
Sol_I1_d0 = np.asarray(Sol_I_d0, order= 'F')
Sol_I2_d0 = Sol_I1_d0.reshape([N_sample, len(t_pred)])
Sol_I_d0_mat = np.transpose(Sol_I2_d0)
Sol_I_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_I_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_I_d0_mat, Weights)*0.5))
Sol_J1_d0 = np.asarray(Sol_J_d0, order= 'F')
Sol_J2_d0 = Sol_J1_d0.reshape([N_sample, len(t_pred)])
Sol_J_d0_mat = np.transpose(Sol_J2_d0)
Sol_J_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_J_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_J_d0_mat, Weights)*0.5))
Sol_D1_d0 = np.asarray(Sol_D_d0, order= 'F')
Sol_D2_d0 = Sol_D1_d0.reshape([N_sample, len(t_pred)])
Sol_D_d0_mat = np.transpose(Sol_D2_d0)
Sol_D_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_D_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_D_d0_mat, Weights)*0.5))
Sol_H1_d0 = np.asarray(Sol_H_d0, order= 'F')
Sol_H2_d0 = Sol_H1_d0.reshape([N_sample, len(t_pred)])
Sol_H_d0_mat = np.transpose(Sol_H2_d0)
Sol_H_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_H_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_H_d0_mat, Weights)*0.5))
Sol_R1_d0 = np.asarray(Sol_R_d0, order= 'F')
Sol_R2_d0 = Sol_R1_d0.reshape([N_sample, len(t_pred)])
Sol_R_d0_mat = np.transpose(Sol_R2_d0)
Sol_R_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_R_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_R_d0_mat, Weights)*0.5))
Sol_newI1_d0 = np.asarray(Sol_newI_d0, order= 'F')
Sol_newI2_d0 = Sol_newI1_d0.reshape([N_sample, len(t_pred)-1])
Sol_newI_d0_mat = np.transpose(Sol_newI2_d0)
Sol_newI_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_newI_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newI_d0_mat, Weights)*0.5))
Sol_newH1_d0 = np.asarray(Sol_newH_d0, order= 'F')
Sol_newH2_d0 = Sol_newH1_d0.reshape([N_sample, len(t_pred)-1])
Sol_newH_d0_mat = np.transpose(Sol_newH2_d0)
Sol_newH_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_newH_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newH_d0_mat, Weights)*0.5))
Sol_newD1_d0 = np.asarray(Sol_newD_d0, order= 'F')
Sol_newD2_d0 = Sol_newD1_d0.reshape([N_sample, len(t_pred)-1])
Sol_newD_d0_mat = np.transpose(Sol_newD2_d0)
Sol_newD_d0_std_V2 = np.sqrt(np.matmul(np.square(Sol_newD_d0_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newD_d0_mat, Weights)*0.5))
Sol_S1_d1 = np.asarray(Sol_S_d1, order= 'F')
Sol_S2_d1 = Sol_S1_d1.reshape([N_sample, len(t_pred)])
Sol_S_d1_mat = np.transpose(Sol_S2_d1)
Sol_S_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_S_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_S_d1_mat, Weights)*0.5))
Sol_I1_d1 = np.asarray(Sol_I_d1, order= 'F')
Sol_I2_d1 = Sol_I1_d1.reshape([N_sample, len(t_pred)])
Sol_I_d1_mat = np.transpose(Sol_I2_d1)
Sol_I_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_I_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_I_d1_mat, Weights)*0.5))
Sol_J1_d1 = np.asarray(Sol_J_d1, order= 'F')
Sol_J2_d1 = Sol_J1_d1.reshape([N_sample, len(t_pred)])
Sol_J_d1_mat = np.transpose(Sol_J2_d1)
Sol_J_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_J_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_J_d1_mat, Weights)*0.5))
Sol_D1_d1 = np.asarray(Sol_D_d1, order= 'F')
Sol_D2_d1 = Sol_D1_d1.reshape([N_sample, len(t_pred)])
Sol_D_d1_mat = np.transpose(Sol_D2_d1)
Sol_D_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_D_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_D_d1_mat, Weights)*0.5))
Sol_H1_d1 = np.asarray(Sol_H_d1, order= 'F')
Sol_H2_d1 = Sol_H1_d1.reshape([N_sample, len(t_pred)])
Sol_H_d1_mat = np.transpose(Sol_H2_d1)
Sol_H_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_H_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_H_d1_mat, Weights)*0.5))
Sol_R1_d1 = np.asarray(Sol_R_d1, order= 'F')
Sol_R2_d1 = Sol_R1_d1.reshape([N_sample, len(t_pred)])
Sol_R_d1_mat = np.transpose(Sol_R2_d1)
Sol_R_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_R_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_R_d1_mat, Weights)*0.5))
Sol_newI1_d1 = np.asarray(Sol_newI_d1, order= 'F')
Sol_newI2_d1 = Sol_newI1_d1.reshape([N_sample, len(t_pred)-1])
Sol_newI_d1_mat = np.transpose(Sol_newI2_d1)
Sol_newI_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_newI_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newI_d1_mat, Weights)*0.5))
Sol_newH1_d1 = np.asarray(Sol_newH_d1, order= 'F')
Sol_newH2_d1 = Sol_newH1_d1.reshape([N_sample, len(t_pred)-1])
Sol_newH_d1_mat = np.transpose(Sol_newH2_d1)
Sol_newH_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_newH_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newH_d1_mat, Weights)*0.5))
Sol_newD1_d1 = np.asarray(Sol_newD_d1, order= 'F')
Sol_newD2_d1 = Sol_newD1_d1.reshape([N_sample, len(t_pred)-1])
Sol_newD_d1_mat = np.transpose(Sol_newD2_d1)
Sol_newD_d1_std_V2 = np.sqrt(np.matmul(np.square(Sol_newD_d1_mat), Weights)*0.5-\
np.square(np.matmul(Sol_newD_d1_mat, Weights)*0.5))
Sol_mean_V2 = odeint(ODEs_mean_2, U_init, t_pred.flatten(), args = (0,0))
Sol_S_mean_V2 = Sol_mean_V2[:,0]
Sol_I_mean_V2 = Sol_mean_V2[:,1]
Sol_J_mean_V2 = Sol_mean_V2[:,2]
Sol_D_mean_V2 = Sol_mean_V2[:,3]
Sol_H_mean_V2 = Sol_mean_V2[:,4]
Sol_R_mean_V2 = Sol_mean_V2[:,5]
Sol_sumI_mean_V2 = Sol_mean_V2[:,6]
Sol_sumH_mean_V2 = Sol_mean_V2[:,7]
Sol_newI_mean_V2 = np.diff(Sol_sumI_mean_V2)
Sol_newH_mean_V2 = np.diff(Sol_sumH_mean_V2)
Sol_newD_mean_V2 = np.diff(Sol_D_mean_V2)
Diff_newI = I_new_PINN[-1]/Sol_newI_mean_V2[0]
Diff_newH = H_new_PINN[-1]/Sol_newH_mean_V2[0]
Diff_newD = D_new_PINN[-1]/Sol_newD_mean_V2[0]
Sol_newI_mean_V2 = Sol_newI_mean_V2 * Diff_newI
Sol_newH_mean_V2 = Sol_newH_mean_V2 * Diff_newH
Sol_newD_mean_V2 = Sol_newD_mean_V2 * Diff_newD
#%%
######################################################################
######################################################################
############################# Save the results ###############################
######################################################################
######################################################################
#%%
#saver
current_directory = os.getcwd()
relative_path = '/Model7/Prediction-Results-'+dt_string+'/'
save_results_to = current_directory + relative_path
if not os.path.exists(save_results_to):
os.makedirs(save_results_to)
np.savetxt(save_results_to + 'Sol_S_mean_V1.txt', Sol_S_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_I_mean_V1.txt', Sol_I_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_J_mean_V1.txt', Sol_J_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_D_mean_V1.txt', Sol_D_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_H_mean_V1.txt', Sol_H_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_R_mean_V1.txt', Sol_R_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newI_mean_V1.txt', Sol_newI_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newH_mean_V1.txt', Sol_newH_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newD_mean_V1.txt', Sol_newD_mean_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_S_d0_std_V1.txt', Sol_S_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_I_d0_std_V1.txt', Sol_I_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_J_d0_std_V1.txt', Sol_J_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_D_d0_std_V1.txt', Sol_D_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_H_d0_std_V1.txt', Sol_H_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_R_d0_std_V1.txt', Sol_R_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newI_d0_std_V1.txt', Sol_newI_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newH_d0_std_V1.txt', Sol_newH_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newD_d0_std_V1.txt', Sol_newD_d0_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_S_d1_std_V1.txt', Sol_S_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_I_d1_std_V1.txt', Sol_I_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_J_d1_std_V1.txt', Sol_J_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_D_d1_std_V1.txt', Sol_D_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_H_d1_std_V1.txt', Sol_H_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_R_d1_std_V1.txt', Sol_R_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newI_d1_std_V1.txt', Sol_newI_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newH_d1_std_V1.txt', Sol_newH_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newD_d1_std_V1.txt', Sol_newD_d1_std_V1.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_S_mean_V2.txt', Sol_S_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_I_mean_V2.txt', Sol_I_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_J_mean_V2.txt', Sol_J_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_D_mean_V2.txt', Sol_D_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_H_mean_V2.txt', Sol_H_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_R_mean_V2.txt', Sol_R_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newI_mean_V2.txt', Sol_newI_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newH_mean_V2.txt', Sol_newH_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newD_mean_V2.txt', Sol_newD_mean_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_S_d0_std_V2.txt', Sol_S_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_I_d0_std_V2.txt', Sol_I_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_J_d0_std_V2.txt', Sol_J_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_D_d0_std_V2.txt', Sol_D_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_H_d0_std_V2.txt', Sol_H_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_R_d0_std_V2.txt', Sol_R_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newI_d0_std_V2.txt', Sol_newI_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newH_d0_std_V2.txt', Sol_newH_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newD_d0_std_V2.txt', Sol_newD_d0_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_S_d1_std_V2.txt', Sol_S_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_I_d1_std_V2.txt', Sol_I_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_J_d1_std_V2.txt', Sol_J_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_D_d1_std_V2.txt', Sol_D_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_H_d1_std_V2.txt', Sol_H_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_R_d1_std_V2.txt', Sol_R_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newI_d1_std_V2.txt', Sol_newI_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newH_d1_std_V2.txt', Sol_newH_d1_std_V2.reshape((-1,1)))
np.savetxt(save_results_to + 'Sol_newD_d1_std_V2.txt', Sol_newD_d1_std_V2.reshape((-1,1)))
#%%
######################################################################
############################# Plotting ###############################
######################################################################
plt.rc('font', size=60)
intervals=[1,2,1]
#%%
plt.rc('font', size=40)
# V1 = data_frame['Daily number of first vaccine doses administered (includes single-dose vaccines)']
# V2 = data_frame['Daily number of second vaccine doses administered (only applicable for two-dose vaccines)']
# V1 = V1[291:].to_numpy(dtype=np.float64)
# V2 = V2[291:].to_numpy(dtype=np.float64)
# V_eff = 0.52*(V1-V2)+0.95*V2
# V_eff = np.convolve(V_eff, np.ones(7), 'valid') / 7
# V_pred_1 = np.asarray([V_1(time) for time in t_pred], dtype=np.float64)
# V_pred_2 = np.asarray([V_2(time) for time in t_pred], dtype=np.float64)
# # plt.plot(V_eff)
# # plt.plot(V1)
# # plt.plot(V2)
# fig, ax = plt.subplots()
# ax.bar(date_total[290:].flatten(), V1[6:], label='dose 1 vacc.')
# ax.bar(date_total[290:].flatten(), V2[6:], label='dose 2 vacc.')
# ax.plot(date_total[290:].flatten(), V_eff, 'k-', lw=5, label='current eff. vacc')
# ax.plot(data_pred.flatten(), V_pred_1, 'r-', lw=5, label='future eff. vacc.')
# ax.plot(data_pred.flatten(), V_pred_2, 'r--', lw=5, label='future eff. vacc. (higher rate)')
# ax.xaxis.set_major_locator(mdates.MonthLocator(interval=2))
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
# ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
# plt.xticks(rotation=30)
# ax.legend(fontsize=35, ncol = 1, loc = 'best')
# ax.tick_params(axis='x', labelsize = 40)
# ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
# plt.rc('font', size=40)
# # ax.grid(True)
# # ax.set_xlabel('Date', fontsize = font)
# ax.set_ylabel('$vaccination$', fontsize = 80)
# fig.set_size_inches(w=25, h=12.5)
# plt.savefig(save_results_to +'Vaccination.pdf', dpi=300)
# plt.savefig(save_results_to +'Vaccination.png', dpi=300)
#%%
#BetaI curve
beta_pred_0 = np.array([BetaI_PINN[-1] for i in range(data_pred[1:].shape[0])])
fig, ax = plt.subplots()
ax.plot(data_mean, BetaI_PINN, 'k-', lw=4, label='PINN-Training')
ax.plot(data_pred[1:].flatten(), beta_pred_0, 'm--', lw=4, label='Prediction-mean')
plt.fill_between(data_pred[1:].flatten(), \
beta_pred_0*(1.1), \
beta_pred_0*(0.9), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred[1:].flatten(), \
beta_pred_0*(1.2), \
beta_pred_0*(0.8), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=2))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
# ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(4,4))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel(r'$\beta_{I}$', fontsize = 80)
fig.set_size_inches(w=25, h=12.5)
plt.savefig(save_results_to +'BetaI.pdf', dpi=300)
plt.savefig(save_results_to +'BetaI.png', dpi=300)
#%%
#New infectious
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newI_mean_V1.flatten()-Sol_newI_d0_std_V1.flatten(), \
Sol_newI_mean_V1.flatten()+Sol_newI_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newI_mean_V1.flatten()-Sol_newI_d1_std_V1.flatten(), \
Sol_newI_mean_V1.flatten()+Sol_newI_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newI_mean_V2.flatten()-Sol_newI_d0_std_V2.flatten(), \
Sol_newI_mean_V2.flatten()+Sol_newI_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newI_mean_V2.flatten()-Sol_newI_d1_std_V2.flatten(), \
Sol_newI_mean_V2.flatten()+Sol_newI_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred[:-1], Sol_newI_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred[:-1], Sol_newI_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(date_total, I_new_star, 'ro', lw=4, markersize=8, label='Data-7davg')
# ax.plot(data_mean[:-1], I_new_PINN, 'k-', lw=4, label='PINN-Training')
ax.plot(data_mean[1:], I_new_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred[:-1], Sol_newI_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred[:-1], Sol_newI_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('daily cases', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'new_cases_7davg.pdf', dpi=300)
plt.savefig(save_results_to + 'new_cases_7davg.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'new_cases_7davg_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'new_cases_7davg_zoom.png', dpi=300)
#%%
#New hospitalized
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newH_mean_V1.flatten()-Sol_newH_d0_std_V1.flatten(), \
Sol_newH_mean_V1.flatten()+Sol_newH_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newH_mean_V1.flatten()-Sol_newH_d1_std_V1.flatten(), \
Sol_newH_mean_V1.flatten()+Sol_newH_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newH_mean_V2.flatten()-Sol_newH_d0_std_V2.flatten(), \
Sol_newH_mean_V2.flatten()+Sol_newH_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newH_mean_V2.flatten()-Sol_newH_d1_std_V2.flatten(), \
Sol_newH_mean_V2.flatten()+Sol_newH_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred[:-1], Sol_newH_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred[:-1], Sol_newH_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(date_total, H_new_star, 'ro', lw=4, markersize=8, label='Data-7davg')
# ax.plot(data_mean[:-1], H_new_PINN, 'k-', lw=4, label='PINN-Training')
ax.plot(data_mean[1:], H_new_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred[:-1], Sol_newH_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred[:-1], Sol_newH_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('daily hospitalized cases', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'new_hospitalized_7davg.pdf', dpi=300)
plt.savefig(save_results_to + 'new_hospitalized_7davg.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'new_hospitalized_7davg_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'new_hospitalized_7davg_zoom.png', dpi=300)
#%%
#New death
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newD_mean_V1.flatten()-Sol_newD_d0_std_V1.flatten(), \
Sol_newD_mean_V1.flatten()+Sol_newD_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newD_mean_V1.flatten()-Sol_newD_d1_std_V1.flatten(), \
Sol_newD_mean_V1.flatten()+Sol_newD_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newD_mean_V2.flatten()-Sol_newD_d0_std_V2.flatten(), \
Sol_newD_mean_V2.flatten()+Sol_newD_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred[:-1].flatten(), \
Sol_newD_mean_V2.flatten()-Sol_newD_d1_std_V2.flatten(), \
Sol_newD_mean_V2.flatten()+Sol_newD_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred[:-1], Sol_newD_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred[:-1], Sol_newD_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(date_total, D_new_star, 'ro', lw=4, markersize=8, label='Data-7davg')
# ax.plot(data_mean[:-1], D_new_PINN, 'k-', lw=4, label='PINN-Training')
ax.plot(data_mean[1:], D_new_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred[:-1], Sol_newD_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred[:-1], Sol_newD_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('daily death cases', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'new_death_7davg.pdf', dpi=300)
plt.savefig(save_results_to + 'new_death_7davg.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'new_death_7davg_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'new_death_7davg_zoom.png', dpi=300)
#%%
#Current Suspectious
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred.flatten(), \
Sol_S_mean_V1.flatten()-Sol_S_d0_std_V1.flatten(), \
Sol_S_mean_V1.flatten()+Sol_S_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_S_mean_V1.flatten()-Sol_S_d1_std_V1.flatten(), \
Sol_S_mean_V1.flatten()+Sol_S_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred.flatten(), \
Sol_S_mean_V2.flatten()-Sol_S_d0_std_V2.flatten(), \
Sol_S_mean_V2.flatten()+Sol_S_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_S_mean_V2.flatten()-Sol_S_d1_std_V2.flatten(), \
Sol_S_mean_V2.flatten()+Sol_S_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred, Sol_S_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred, Sol_S_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(data_mean, S_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred, Sol_S_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred, Sol_S_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\mathbb{S}$', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'Current_Suspectious.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Suspectious.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'Current_Suspectious_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Suspectious_zoom.png', dpi=300)
#%%
# #Current Exposed
# for i in [1,2]:
# fig, ax = plt.subplots()
# plt.fill_between(data_pred.flatten(), \
# Sol_E_mean_V1.flatten()-Sol_E_d0_std_V1.flatten(), \
# Sol_E_mean_V1.flatten()+Sol_E_d0_std_V1.flatten(), \
# facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
# plt.fill_between(data_pred.flatten(), \
# Sol_E_mean_V1.flatten()-Sol_E_d1_std_V1.flatten(), \
# Sol_E_mean_V1.flatten()+Sol_E_d1_std_V1.flatten(), \
# facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
# plt.fill_between(data_pred.flatten(), \
# Sol_E_mean_V2.flatten()-Sol_E_d0_std_V2.flatten(), \
# Sol_E_mean_V2.flatten()+Sol_E_d0_std_V2.flatten(), \
# facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
# plt.fill_between(data_pred.flatten(), \
# Sol_E_mean_V2.flatten()-Sol_E_d1_std_V2.flatten(), \
# Sol_E_mean_V2.flatten()+Sol_E_d1_std_V2.flatten(), \
# facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
# if i==1:
# ax.plot(data_pred, Sol_E_mean_V1, 'm--', lw=4, label='Prediction-mean')
# ax.plot(data_pred, Sol_E_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
# ax.plot(data_mean, E_PINN, 'k-', lw=4, label='PINN-Training')
# if i==2:
# ax.plot(data_pred, Sol_E_mean_V1, 'm--', lw=7, label='Prediction-mean')
# ax.plot(data_pred, Sol_E_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# # ax.set_xlim(200,300)
# # ax.set_ylim(0-0.5,6000+0.5)
# ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
# ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
# plt.xticks(rotation=30)
# ax.legend(fontsize=35, ncol = 1, loc = 'best')
# ax.tick_params(axis='x', labelsize = 40)
# ax.tick_params(axis='y', labelsize = 40)
# # ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
# plt.rc('font', size=40)
# ax.grid(True)
# # ax.set_xlabel('Date', fontsize = font)
# ax.set_ylabel('$\mathbb{E}$', fontsize = 40)
# fig.set_size_inches(w=25, h=12.5)
# if i==1:
# plt.savefig(save_results_to + 'Current_Exposed.pdf', dpi=300)
# plt.savefig(save_results_to + 'Current_Exposed.png', dpi=300)
# if i==2:
# plt.savefig(save_results_to + 'Current_Exposed_zoom.pdf', dpi=300)
# plt.savefig(save_results_to + 'Current_Exposed_zoom.png', dpi=300)
#%%
#Current infectious
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred.flatten(), \
Sol_I_mean_V1.flatten()-Sol_I_d0_std_V1.flatten(), \
Sol_I_mean_V1.flatten()+Sol_I_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_I_mean_V1.flatten()-Sol_I_d1_std_V1.flatten(), \
Sol_I_mean_V1.flatten()+Sol_I_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred.flatten(), \
Sol_I_mean_V2.flatten()-Sol_I_d0_std_V2.flatten(), \
Sol_I_mean_V2.flatten()+Sol_I_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_I_mean_V2.flatten()-Sol_I_d1_std_V2.flatten(), \
Sol_I_mean_V2.flatten()+Sol_I_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred, Sol_I_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred, Sol_I_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(data_mean, I_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred, Sol_I_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred, Sol_I_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\mathbb{I}$', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'Current_Infectious.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Infectious.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'Current_Infectious_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Infectious_zoom.png', dpi=300)
#%%
#Current asymptomatic
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred.flatten(), \
Sol_J_mean_V1.flatten()-Sol_J_d0_std_V1.flatten(), \
Sol_J_mean_V1.flatten()+Sol_J_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_J_mean_V1.flatten()-Sol_J_d1_std_V1.flatten(), \
Sol_J_mean_V1.flatten()+Sol_J_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred.flatten(), \
Sol_J_mean_V2.flatten()-Sol_J_d0_std_V2.flatten(), \
Sol_J_mean_V2.flatten()+Sol_J_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_J_mean_V2.flatten()-Sol_J_d1_std_V2.flatten(), \
Sol_J_mean_V2.flatten()+Sol_J_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred, Sol_J_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred, Sol_J_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(data_mean, J_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred, Sol_J_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred, Sol_J_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\mathbb{J}$', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'Current_Asymptomatic.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Asymptomatic.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'Current_Asymptomatic_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Asymptomatic_zoom.png', dpi=300)
#%%
#Current death
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred.flatten(), \
Sol_D_mean_V1.flatten()-Sol_D_d0_std_V1.flatten(), \
Sol_D_mean_V1.flatten()+Sol_D_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_D_mean_V1.flatten()-Sol_D_d1_std_V1.flatten(), \
Sol_D_mean_V1.flatten()+Sol_D_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred.flatten(), \
Sol_D_mean_V2.flatten()-Sol_D_d0_std_V2.flatten(), \
Sol_D_mean_V2.flatten()+Sol_D_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_D_mean_V2.flatten()-Sol_D_d1_std_V2.flatten(), \
Sol_D_mean_V2.flatten()+Sol_D_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred, Sol_D_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred, Sol_D_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(data_mean, D_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred, Sol_D_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred, Sol_D_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\mathbb{D}$', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'Current_Death.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Death.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'Current_Death_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Death_zoom.png', dpi=300)
#%%
#Current hospitalized
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred.flatten(), \
Sol_H_mean_V1.flatten()-Sol_H_d0_std_V1.flatten(), \
Sol_H_mean_V1.flatten()+Sol_H_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_H_mean_V1.flatten()-Sol_H_d1_std_V1.flatten(), \
Sol_H_mean_V1.flatten()+Sol_H_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred.flatten(), \
Sol_H_mean_V2.flatten()-Sol_H_d0_std_V2.flatten(), \
Sol_H_mean_V2.flatten()+Sol_H_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_H_mean_V2.flatten()-Sol_H_d1_std_V2.flatten(), \
Sol_H_mean_V2.flatten()+Sol_H_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred, Sol_H_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred, Sol_H_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
# ax.plot(data_mean, H_star, 'ro', lw=4, markersize=8, label='Data-7davg')
ax.plot(data_mean, H_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred, Sol_H_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred, Sol_H_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\mathbb{H}$', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'Current_Hospitalized.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Hospitalized.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'Current_Hospitalized_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Hospitalized_zoom.png', dpi=300)
#%%
#Current removed
for i in [1,2]:
fig, ax = plt.subplots()
plt.fill_between(data_pred.flatten(), \
Sol_R_mean_V1.flatten()-Sol_R_d0_std_V1.flatten(), \
Sol_R_mean_V1.flatten()+Sol_R_d0_std_V1.flatten(), \
facecolor=(0.1,0.2,0.5,0.3), interpolate=True, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_R_mean_V1.flatten()-Sol_R_d1_std_V1.flatten(), \
Sol_R_mean_V1.flatten()+Sol_R_d1_std_V1.flatten(), \
facecolor=(0.1,0.5,0.8,0.3), interpolate=True, label='Prediction-std-(20%)')
plt.fill_between(data_pred.flatten(), \
Sol_R_mean_V2.flatten()-Sol_R_d0_std_V2.flatten(), \
Sol_R_mean_V2.flatten()+Sol_R_d0_std_V2.flatten(), \
facecolor=(0.6,0.2,0.5,0.3), interpolate=True)#, label='Prediction-std-(10%)')
plt.fill_between(data_pred.flatten(), \
Sol_R_mean_V2.flatten()-Sol_R_d1_std_V2.flatten(), \
Sol_R_mean_V2.flatten()+Sol_R_d1_std_V2.flatten(), \
facecolor=(0.6,0.5,0.8,0.3), interpolate=True)#, label='Prediction-std-(20%)')
if i==1:
ax.plot(data_pred, Sol_R_mean_V1, 'm--', lw=4, label='Prediction-mean')
ax.plot(data_pred, Sol_R_mean_V2, 'g--', lw=4, label='Prediction-mean (higher rate vacc.)')
ax.plot(data_mean, R_PINN, 'k-', lw=4, label='PINN-Training')
if i==2:
ax.plot(data_pred, Sol_R_mean_V1, 'm--', lw=7, label='Prediction-mean')
ax.plot(data_pred, Sol_R_mean_V2, 'g--', lw=7, label='Prediction-mean (higher rate vacc.)')
# ax.set_xlim(200,300)
# ax.set_ylim(0-0.5,6000+0.5)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=intervals[i]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=7))
plt.xticks(rotation=30)
ax.legend(fontsize=35, ncol = 1, loc = 'best')
ax.tick_params(axis='x', labelsize = 40)
ax.tick_params(axis='y', labelsize = 40)
# ax.ticklabel_format(axis='y', style='sci', scilimits=(3,3))
plt.rc('font', size=40)
ax.grid(True)
# ax.set_xlabel('Date', fontsize = font)
ax.set_ylabel('$\mathbb{R}$', fontsize = 40)
fig.set_size_inches(w=25, h=12.5)
if i==1:
plt.savefig(save_results_to + 'Current_Recovered.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Recovered.png', dpi=300)
if i==2:
plt.savefig(save_results_to + 'Current_Recovered_zoom.pdf', dpi=300)
plt.savefig(save_results_to + 'Current_Recovered_zoom.png', dpi=300)
| 49.177294
| 112
| 0.633914
| 10,792
| 63,242
| 3.365827
| 0.035119
| 0.044048
| 0.036147
| 0.023786
| 0.905627
| 0.876638
| 0.858964
| 0.853238
| 0.847346
| 0.833691
| 0
| 0.061234
| 0.193811
| 63,242
| 1,285
| 113
| 49.215564
| 0.651211
| 0.124901
| 0
| 0.548857
| 0
| 0
| 0.089276
| 0.0321
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004158
| false
| 0
| 0.022869
| 0
| 0.035343
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53cc51a93ef82b14a7ec16ec867df498664a7acf
| 7,468
|
py
|
Python
|
AP_SS16/504/python/plot_helpers.py
|
DimensionalScoop/kautschuk
|
90403f97cd60b9716cb6a06668196891d5d96578
|
[
"MIT"
] | 3
|
2016-04-27T17:07:00.000Z
|
2022-02-02T15:43:15.000Z
|
AP_SS16/504/python/plot_helpers.py
|
DimensionalScoop/kautschuk
|
90403f97cd60b9716cb6a06668196891d5d96578
|
[
"MIT"
] | 5
|
2016-04-27T17:10:03.000Z
|
2017-06-20T14:54:20.000Z
|
AP_SS16/504/python/plot_helpers.py
|
DimensionalScoop/kautschuk
|
90403f97cd60b9716cb6a06668196891d5d96578
|
[
"MIT"
] | null | null | null |
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import uncertainties
import uncertainties.unumpy as unp
from scipy.constants import C2K, K2C
from scipy.optimize import curve_fit
from uncertainties import ufloat
from uncertainties.unumpy import uarray
def extract_error(data):
if(isinstance(data[0], uncertainties.UFloat)):
error = unp.std_devs(data)
nominal = unp.nominal_values(data)
else:
nominal = data
error = None
return nominal, error
def autolimits(data, err=None):
min_lim = min(data)
max_lim = max(data)
offset = (max(data) - min(data)) * 0.025
if err is not None:
offset += max(err)/2
return [min_lim - offset, max_lim + offset]
def plot(x_messung, y_messung, xlabel, ylabel, filename, theorie):
"""Plottet diskrete Messwerte gegen eine kontinuierliche Messkurve
Args:
x_messung (uarray)
y_messung (uarray)
theorie (func(x)): Theoriefunktion, die x-Werte annimmt und y-Werte ausspuckt
xlabel (string)
ylabel (string)
filename (string)
Returns:
TYPE: None
"""
# plt.clf()
# plt.clf()
x_messung, x_error = extract_error(x_messung)
y_messung, y_error = extract_error(y_messung)
x_limit = autolimits(x_messung, err=x_error)
x_flow = np.linspace(*x_limit, num=1000)
y_messung = y_messung
if theorie is not None:
plt.plot(x_flow, theorie(x_flow), 'g-', label="Fit")
#plt.errorbar(x_messung, y_messung, xerr=x_error, yerr=y_error, fmt='r,', label="Fehler")
plt.plot(x_messung, y_messung, 'r.', label="Messwerte")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0, h_pad=1.18, w_pad=1.18)
# plt.show()
plt.savefig('../plots/' + filename)
def plot2(x_messung, y_messung, xlabel, ylabel, filename, theorie):
"""Plottet diskrete Messwerte gegen eine kontinuierliche Messkurve
Args:
x_messung (uarray)
y_messung (uarray)
theorie (func(x)): Theoriefunktion, die x-Werte annimmt und y-Werte ausspuckt
xlabel (string)
ylabel (string)
filename (string)
Returns:
TYPE: None
"""
# plt.clf()
# plt.clf()
x_messung, x_error = extract_error(x_messung)
y_messung, y_error = extract_error(y_messung)
x_limit = autolimits(x_messung, err=x_error)
x_flow = np.linspace(*x_limit, num=1000)
y_messung = y_messung
if theorie is not None:
plt.plot(x_flow, theorie(x_flow), 'g-', label="Fit")
#plt.errorbar(x_messung, y_messung, xerr=x_error, yerr=y_error, fmt='b,', label="Fehler")
plt.plot(x_messung, y_messung, 'b.', label="Messwerte")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='best')
plt.xlim(x_limit)
plt.ylim(autolimits(y_messung, err=y_error))
plt.grid()
plt.tight_layout(pad=0, h_pad=1.18, w_pad=1.18)
# plt.show()
plt.grid()
plt.savefig('../plots/' + filename)
def plot3(x_messung, y_messung, xlabel, ylabel, filename, theorie):
"""Plottet diskrete Messwerte gegen eine kontinuierliche Messkurve
Args:
x_messung (uarray)
y_messung (uarray)
theorie (func(x)): Theoriefunktion, die x-Werte annimmt und y-Werte ausspuckt
xlabel (string)
ylabel (string)
filename (string)
Returns:
TYPE: None
"""
# plt.clf()
# plt.clf()
x_messung, x_error = extract_error(x_messung)
y_messung, y_error = extract_error(y_messung)
x_limit = autolimits(x_messung, err=x_error)
x_flow = np.linspace(*x_limit, num=1000)
y_messung = y_messung
if theorie is not None:
plt.plot(x_flow, theorie(x_flow), 'g-', label="Fit")
#plt.errorbar(x_messung, y_messung, xerr=x_error, yerr=y_error, fmt='r,', label="Fehler")
plt.plot(x_messung, y_messung, 'g.', label="Messwerte")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0, h_pad=1.18, w_pad=1.18)
# plt.show()
plt.savefig('../plots/' + filename)
def plot4(x_messung, y_messung, xlabel, ylabel, filename, theorie):
"""Plottet diskrete Messwerte gegen eine kontinuierliche Messkurve
Args:
x_messung (uarray)
y_messung (uarray)
theorie (func(x)): Theoriefunktion, die x-Werte annimmt und y-Werte ausspuckt
xlabel (string)
ylabel (string)
filename (string)
Returns:
TYPE: None
"""
# plt.clf()
# plt.clf()
x_messung, x_error = extract_error(x_messung)
y_messung, y_error = extract_error(y_messung)
x_limit = autolimits(x_messung, err=x_error)
x_flow = np.linspace(*x_limit, num=1000)
y_messung = y_messung
if theorie is not None:
plt.plot(x_flow, theorie(x_flow), 'g-', label="Fit")
#plt.errorbar(x_messung, y_messung, xerr=x_error, yerr=y_error, fmt='r,', label="Fehler")
plt.plot(x_messung, y_messung, 'm.', label="Messwerte")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0, h_pad=1.18, w_pad=1.18)
# plt.show()
plt.savefig('../plots/' + filename)
def plot5(x_messung, y_messung, xlabel, ylabel, filename, theorie):
"""Plottet diskrete Messwerte gegen eine kontinuierliche Messkurve
Args:
x_messung (uarray)
y_messung (uarray)
theorie (func(x)): Theoriefunktion, die x-Werte annimmt und y-Werte ausspuckt
xlabel (string)
ylabel (string)
filename (string)
Returns:
TYPE: None
"""
# plt.clf()
# plt.clf()
x_messung, x_error = extract_error(x_messung)
y_messung, y_error = extract_error(y_messung)
x_limit = autolimits(x_messung, err=x_error)
x_flow = np.linspace(*x_limit, num=1000)
y_messung = y_messung
if theorie is not None:
plt.plot(x_flow, theorie(x_flow), 'g-', label="Fit")
#plt.errorbar(x_messung, y_messung, xerr=x_error, yerr=y_error, fmt='r,', label="Fehler")
plt.plot(x_messung, y_messung, 'y.', label="Messwerte")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0, h_pad=1.18, w_pad=1.18)
# plt.show()
plt.savefig('../plots/' + filename)
def log_plot(x_messung, y_messung, xlabel, ylabel, filename, theorie):
"""Plottet diskrete Messwerte gegen eine kontinuierliche Messkurve
Args:
x_messung (uarray)
y_messung (uarray)
theorie (func(x)): Theoriefunktion, die x-Werte annimmt und y-Werte ausspuckt
xlabel (string)
ylabel (string)
filename (string)
Returns:
TYPE: None
"""
# plt.clf()
# plt.clf()
x_messung, x_error = extract_error(x_messung)
y_messung, y_error = extract_error(y_messung)
x_limit = autolimits(x_messung, err=x_error)
x_flow = np.linspace(*x_limit, num=1000)
y_messung = y_messung
if theorie is not None:
plt.loglog(x_flow, theorie(x_flow), 'g-', label="Fit")
#plt.errorbar(x_messung, y_messung, xerr=x_error, yerr=y_error, fmt='r,', label="Fehler")
plt.loglog(x_messung, y_messung, 'm.', label="Messwerte")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0, h_pad=1.18, w_pad=1.18)
# plt.show()
plt.savefig('../plots/' + filename)
| 26.960289
| 93
| 0.647161
| 1,058
| 7,468
| 4.386578
| 0.10397
| 0.084465
| 0.096962
| 0.082741
| 0.868563
| 0.862745
| 0.862745
| 0.862745
| 0.855419
| 0.855419
| 0
| 0.013458
| 0.223889
| 7,468
| 276
| 94
| 27.057971
| 0.787267
| 0.330209
| 0
| 0.661017
| 0
| 0
| 0.037006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.084746
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53d0b94c773f39a17acc3303f5a33b0271b90900
| 15,439
|
py
|
Python
|
ckanext/notify/actions.py
|
andela/ckanext-notify
|
1ce3c136c933850133a3c1715bb8c8a8ec81019e
|
[
"MIT"
] | null | null | null |
ckanext/notify/actions.py
|
andela/ckanext-notify
|
1ce3c136c933850133a3c1715bb8c8a8ec81019e
|
[
"MIT"
] | 2
|
2017-10-17T09:07:21.000Z
|
2017-10-27T08:34:52.000Z
|
ckanext/notify/actions.py
|
andela/ckanext-notify
|
1ce3c136c933850133a3c1715bb8c8a8ec81019e
|
[
"MIT"
] | 2
|
2017-11-07T12:22:25.000Z
|
2017-11-10T07:22:06.000Z
|
import ckan.plugins as plugins
import ckan.logic as logic
import constants
import validator
import db
toolkit = plugins.toolkit
c = toolkit.c
ValidationError = logic.ValidationError
def _dictize_slack_details(slack_details):
# Convert the slack details into a dict
data_dict = {
'id': slack_details.id,
'webhook_url': slack_details.webhook_url,
'slack_channel': slack_details.slack_channel,
'organization_id': slack_details.organization_id
}
return data_dict
def _undictize_slack_basic(slack_details, data_dict):
slack_details.webhook_url = data_dict['webhook_url']
slack_details.slack_channel = data_dict['slack_channel']
slack_details.organization_id = data_dict['organization_id']
def _dictize_email_details(email_details):
# Convert the slack details into a dict
data_dict = {
'id': email_details.id,
'email': email_details.email,
'organization_id': email_details.organization_id
}
return data_dict
def _undictize_email_basic(email_details, data_dict):
email_details.email = data_dict['email']
email_details.organization_id = data_dict['organization_id']
def datarequest_register_slack(context, data_dict):
'''
Action to register a slack channel. The function checks the access rights
of the user before creating the slack channel. If the user is not allowed
a NotAuthorized exception will be risen.
In addition, you should note that the parameters will be checked and an
exception (ValidationError) will be risen if some of these parameters are
not valid.
:param context: the context of the request
:type context: dict
:param data_dict: Contains the following:
webhook_url: The webhook_url of the slack organization
slack_channel: The slack_channel via which notifications are to be received
organization_id: The ID of the organization
:type data_dict: dict
:returns: A dict with the slack details (id, webhook_url, channel
organization_id)
:rtype: dict
'''
model = context['model']
session = context['session']
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Validate data
validator.validate_slack_form(context, data_dict)
# Store the data
slack_details = db.Org_Slack_Details()
_undictize_slack_basic(slack_details, data_dict)
session.add(slack_details)
session.commit()
return _dictize_slack_details(slack_details)
def slack_channels_show(context, data_dict):
'''
Action to retrieve the slack notification channels. The only required
parameter is the name of the organization passed as id. A NotFound
exception will be risen if the organization id is not found.
Access rights will be checked before returning the information and an
exception will be risen (NotAuthorized) if the user is not authorized.
:param context: the context of the request
:type context: dict
:param data_dict: Contains the following
organization_id: The ID of the organization
:type data_dict: dict
:returns: A list of the slack notification details(id,
organization_id, webhook_url, slack_channel)
:rtype: list
'''
model = context['model']
organization_id = data_dict.get('organization_id')
success = data_dict.get('success', False)
if not organization_id and not success:
raise toolkit.ValidationError(toolkit._('Organization ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
if not success:
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the available slack channels
result = db.Org_Slack_Details.get(organization_id=organization_id)
if result:
slack_channels = [_dictize_slack_details(channel) for channel in result]
else:
slack_channels = []
return slack_channels
def slack_channel_show(context, data_dict):
'''
Action to retrieve the information of a slack notification channel.
The only required parameter is the id of the channel. A NotFound
exception will be risen if the given id is not found.
Access rights will be checked before returning the information and an
exception will be risen (NotAuthorized) if the user is not authorized.
:param context: the context of the request
:type context: dict
:param data_dict: Contains the following
id: The id of the slack notification channel to be shown
:type data_dict: dict
:returns: A dict with the data request (id, user_id, title, description,
organization_id, open_time, accepted_dataset, close_time, closed)
:rtype: dict
'''
model = context['model']
id = data_dict.get('id', '')
if not id:
raise toolkit.ValidationError(toolkit._('Channel ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the data request
result = db.Org_Slack_Details.get(id=id)
if not result:
raise toolkit.ObjectNotFound(toolkit._('Channel {0} not found in the data base').format(id))
slack_data = result[0]
data_dict = _dictize_slack_details(slack_data)
return data_dict
def slack_channel_update(context, data_dict):
'''
Action to update a slack channel. The only required parameter is the id
of the channel. The function checks the access rights of the user before
updating the slack channel. If the user is not allowed a NotAuthorized
exception will be risen.
In addition, you should note that the parameters will be checked and an
exception (ValidationError) will be risen if some of these parameters are
invalid.
:param context: the context of the request
:type data_dict: dict
:param data_dict: Contains the following:
id: The ID of the slack channel to be updated
webhook_url: The webhook_url of the slack organization
slack_channel: The slack_channel
organization_id: The ID of the organization whose channel is to be updated
:type data_dict: dict
:returns: A dict with the data request (id, webhook_url, slack_channel,
organization_id)
:rtype: dict
'''
model = context['model']
session = context['session']
id = data_dict.get('id', '')
if not id:
raise toolkit.ValidationError(toolkit._('Slack Channel ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the initial data
result = db.Org_Slack_Details.get(id=id)
if not result:
raise toolkit.ObjectNotFound(toolkit._('Channel {0} not found in the database').format(id))
slack_details = result[0]
# Validate data
validator.validate_slack_form(context, data_dict)
# Set the data provided by the user in the data_red
_undictize_slack_basic(slack_details, data_dict)
session.add(slack_details)
session.commit()
return _dictize_slack_details(slack_details)
def slack_channel_delete(context, data_dict):
'''
Action to delete a slack channel. The function checks the access rights
of the user before deleting the data request. If the user is not allowed
a NotAuthorized exception will be risen.
:param context: the context of the request
:type data_dict: dict
:param data_dict: Contains the following
id: The id of the slack notification channel to delete
:type data_dict: dict
'''
model = context['model']
session = context['session']
id = data_dict.get('id', '')
# Check id
if not id:
raise toolkit.ValidationError(toolkit._('Slack Channel ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the slack channel
result = db.Org_Slack_Details.get(id=id)
if not result:
raise toolkit.ObjectNotFound(toolkit._('Channel {0} not found in the database').format(id))
slack_details = result[0]
session.delete(slack_details)
session.commit()
def datarequest_register_email(context, data_dict):
'''
Action to register the organization email address used for notifications.
The function checks the access rights of the user before creating the
data request. If the user is not allowed a NotAuthorized exception will be risen.
In addition, you should note that the parameters will be checked and an
exception (ValidationError) will be risen if some of these parameters are
not valid.
:param title: The title of the data request
:type title: string
:param description: A brief description for your data request
:type description: string
:param organiztion_id: The ID of the organization you want to asign the
data request (optional).
:type organization_id: string
:returns: A dict with the data request (id, user_id, title, description,
organization_id, open_time, accepted_dataset, close_time, closed)
:rtype: dict
'''
model = context['model']
session = context['session']
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Validate data
validator.validate_email_form(context, data_dict)
# Store the data
email_details = db.Org_Email_Details()
_undictize_email_basic(email_details, data_dict)
session.add(email_details)
session.commit()
return _dictize_email_details(email_details)
def email_channel_show(context, data_dict):
'''
Action to retrieve the information of an email notification channel.
The only required parameter is the id of the channel. A NotFound
exception will be risen if the given id is not found.
Access rights will be checked before returning the information and an
exception will be risen (NotAuthorized) if the user is not authorized.
:param context: the context of the request
:type data_dict: dict
:param data_dict: Contains the following
id: The id of the email notification channel to be shown
:type data_dict: dict
:returns: A dict with the data request (id, user_id, title, description,
organization_id, open_time, accepted_dataset, close_time, closed)
:rtype: dict
'''
model = context['model']
id = data_dict.get('id', '')
if not id:
raise toolkit.ValidationError(toolkit._('Email ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the data request
result = db.Org_Email_Details.get(id=id)
if not result:
raise toolkit.ObjectNotFound(toolkit._('Email {0} not found in the data base'.format(id)))
email_data = result[0]
data_dict = _dictize_email_details(email_data)
return data_dict
def email_channels_show(context, data_dict):
'''
Action to retrieve the email notification channels. The only required
parameter is the name of the organization passed in as an id. A NotFound
exception will be risen if the organization id is not found.
Access rights will be checked before returning the information and an
exception will be risen (NotAuthorized) if the user is not authorized.
:param context: the context of the request
:type data_dict: dict
:param data_dict: Contains the following
organization_id: The ID of the organization
:type data_dict: dict
:returns: A list of the email notification details(id,
organization_id, email)
:rtype: list
'''
model = context['model']
organization_id = data_dict.get('organization_id', '')
if not organization_id:
raise toolkit.ValidationError(toolkit._('Organization ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the available slack channels
result = db.Org_Email_Details.get(organization_id=organization_id)
if result:
email_channels = [_dictize_email_details(channel) for channel in result]
else:
email_channels = []
return email_channels
def email_channel_update(context, data_dict):
'''
Action to update an email notification channel. The only required parameter is the id
of the channel. The function checks the access rights of the user before
updating the email notification channel. If the user is not allowed a NotAuthorized
exception will be risen.
In addition, you should note that the parameters will be checked and an
exception (ValidationError) will be risen if some of these parameters are
invalid.
:param context: the context of the request
:type data_dict: dict
:param data_dict: Contains the following:
id: The ID of the email notification channel to be updated
email: The email address of the organization
organization_id: The ID of the organization whose channel is to be updated
:type data_dict: dict
:returns: A dict with the data request (id, email,
organization_id)
:rtype: dict
'''
model = context['model']
session = context['session']
id = data_dict.get('id', '')
if not id:
raise toolkit.ValidationError(toolkit._('Email Notification ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the initial data
result = db.Org_Email_Details.get(id=id)
if not result:
raise toolkit.ObjectNotFound(toolkit._('Email {0} not found in the database'.format(id)))
email_data = result[0]
# Validate data
validator.validate_email_form(context, data_dict)
# Set the data provided by the user in the data_dict
_undictize_email_basic(email_data, data_dict)
session.add(email_data)
session.commit()
return _dictize_email_details(email_data)
def email_channel_delete(context, data_dict):
'''
Action to delete an email notification channel. The function checks the access rights
of the user before deleting the data request. If the user is not allowed
a NotAuthorized exception will be risen.
:param context: the context of the request
:type data_dict: dict
:param data_dict: Contains the following
id: The id of the slack notification channel to delete
:type data_dict: dict
'''
model = context['model']
session = context['session']
id = data_dict.get('id', '')
# Check id
if not id:
raise toolkit.ValidationError(toolkit._('Email ID has not been included'))
# Init the data base
db.init_db(model)
# Check access
toolkit.check_access(constants.MANAGE_NOTIFICATIONS, context, data_dict)
# Get the slack channel
result = db.Org_Email_Details.get(id=id)
if not result:
raise toolkit.ObjectNotFound(toolkit._('Email {0} not found in the database'.format(id)))
email_data = result[0]
session.delete(email_data)
session.commit()
| 32.989316
| 100
| 0.716303
| 2,140
| 15,439
| 5.013551
| 0.070561
| 0.057414
| 0.033554
| 0.014913
| 0.877342
| 0.845745
| 0.831485
| 0.805294
| 0.763724
| 0.74033
| 0
| 0.000991
| 0.215299
| 15,439
| 467
| 101
| 33.059957
| 0.884606
| 0.476002
| 0
| 0.613095
| 0
| 0
| 0.103514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.029762
| 0
| 0.172619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9905ff6c7182f40a93d2f7f1d27ce2968636f617
| 965
|
py
|
Python
|
alpa/collective/__init__.py
|
alpa-projects/alpa
|
2c54de2a8fa8a48c77069f4bad802f4e8fa6d126
|
[
"Apache-2.0"
] | 114
|
2022-03-02T20:38:16.000Z
|
2022-03-31T20:41:50.000Z
|
alpa/collective/__init__.py
|
alpa-projects/alpa
|
2c54de2a8fa8a48c77069f4bad802f4e8fa6d126
|
[
"Apache-2.0"
] | 6
|
2022-03-09T22:04:50.000Z
|
2022-03-30T17:53:15.000Z
|
alpa/collective/__init__.py
|
alpa-projects/alpa
|
2c54de2a8fa8a48c77069f4bad802f4e8fa6d126
|
[
"Apache-2.0"
] | 5
|
2022-03-05T12:04:31.000Z
|
2022-03-31T03:55:42.000Z
|
from alpa.collective.collective import (
nccl_available, gloo_available, is_group_initialized, init_collective_group,
destroy_collective_group, create_collective_group, get_rank,
get_collective_group_size, allreduce, allreduce_multigpu, barrier, reduce,
reduce_multigpu, broadcast, broadcast_partialgpu, broadcast_multigpu, allgather,
allgather_multigpu, reducescatter, reducescatter_multigpu, send,
send_multigpu, recv, recv_multigpu, check_and_get_group)
__all__ = [
"nccl_available", "gloo_available", "is_group_initialized",
"init_collective_group", "destroy_collective_group",
"create_collective_group", "get_rank", "get_collective_group_size",
"allreduce", "allreduce_multigpu", "barrier", "reduce", "reduce_multigpu",
"broadcast", "broadcast_multigpu", "allgather", "allgather_multigpu",
"reducescatter", "reducescatter_multigpu", "send", "send_multigpu", "recv",
"recv_multigpu", "check_and_get_group"
]
| 53.611111
| 84
| 0.780311
| 104
| 965
| 6.730769
| 0.278846
| 0.171429
| 0.048571
| 0.074286
| 0.932857
| 0.932857
| 0.932857
| 0.932857
| 0.932857
| 0.932857
| 0
| 0
| 0.11399
| 965
| 17
| 85
| 56.764706
| 0.818713
| 0
| 0
| 0
| 0
| 0
| 0.358549
| 0.119171
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54e41b146aef92a9f935343ae7509ac96e08e701
| 107
|
py
|
Python
|
src/pipedown/nodes/filters/__init__.py
|
brendanhasz/drainpype
|
a183acec7cae1ef9fde260868e2b021516a8cd7f
|
[
"MIT"
] | 2
|
2021-03-03T12:11:24.000Z
|
2021-03-18T15:09:52.000Z
|
src/pipedown/nodes/filters/__init__.py
|
brendanhasz/pipedown
|
a183acec7cae1ef9fde260868e2b021516a8cd7f
|
[
"MIT"
] | null | null | null |
src/pipedown/nodes/filters/__init__.py
|
brendanhasz/pipedown
|
a183acec7cae1ef9fde260868e2b021516a8cd7f
|
[
"MIT"
] | null | null | null |
from .collate import Collate
from .feature_filter import FeatureFilter
from .item_filter import ItemFilter
| 26.75
| 41
| 0.859813
| 14
| 107
| 6.428571
| 0.571429
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11215
| 107
| 3
| 42
| 35.666667
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54e6ae2d3d0305bb0098fdd116a6cf65079cb120
| 8,729
|
py
|
Python
|
tests/data/azure/compute.py
|
Cloudanix/cartography
|
653d3cccbb9318e876fd558d386593e3612f4f78
|
[
"Apache-2.0"
] | null | null | null |
tests/data/azure/compute.py
|
Cloudanix/cartography
|
653d3cccbb9318e876fd558d386593e3612f4f78
|
[
"Apache-2.0"
] | 11
|
2020-12-21T02:51:11.000Z
|
2022-03-15T14:30:43.000Z
|
tests/data/azure/compute.py
|
Cloudanix/cartography
|
653d3cccbb9318e876fd558d386593e3612f4f78
|
[
"Apache-2.0"
] | 1
|
2021-02-05T08:08:47.000Z
|
2021-02-05T08:08:47.000Z
|
DESCRIBE_VMS = [
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/virtualMachines/TestVM",
"type": "Microsoft.Compute/virtualMachines",
"location": "West US",
"resource_group": "TestRG",
"name": "TestVM",
"plan": {
"product": "Standard",
},
"handware_profile": {
"vm_size": "Standard_D2s_v3",
},
"license_type": "Windows_Client ",
"os_profile": {
"computer_name": "TestVM",
},
"identity": {
"type": "SystemAssigned",
},
"zones": [
"West US 2",
],
"additional_capabilities": {
"ultra_ssd_enabled": True,
},
"priority": "Low",
"eviction_policy": "Deallocate",
},
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/virtualMachines/TestVM1",
"type": "Microsoft.Compute/virtualMachines",
"location": "West US",
"resource_group": "TestRG",
"name": "TestVM1",
"plan": {
"product": "Standard",
},
"handware_profile": {
"vm_size": "Standard_D2s_v3",
},
"license_type": "Windows_Client ",
"os_profile": {
"computer_name": "TestVM1",
},
"identity": {
"type": "SystemAssigned",
},
"zones": [
"West US 2",
],
"additional_capabilities": {
"ultra_ssd_enabled": True,
},
"priority": "Low",
"eviction_policy": "Deallocate",
},
]
DESCRIBE_VM_DATA_DISKS = [
{
"lun": 0,
"name": "dd0",
"create_option": "Empty",
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Premium_LRS",
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/disks/dd0",
},
"disk_size_gb": 30,
},
{
"lun": 0,
"name": "dd1",
"create_option": "Empty",
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Premium_LRS",
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/disks/dd1",
},
"disk_size_gb": 30,
},
]
DESCRIBE_DISKS = [
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/disks/dd0",
"type": "Microsoft.Compute/disks",
"location": "West US",
"resource_group": "TestRG",
"name": "dd0",
"creation_data": {
"create_option": "Attach",
},
"disk_size_gb": 100,
"encryption_settings_collection": {
"enabled": True,
},
"max_shares": 10,
"network_access_policy": "AllowAll",
"os_type": "Windows",
"tier": "P4",
"sku": {
"name": "Standard_LRS",
},
"zones": [
"West US 2",
],
},
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/disks/dd1",
"type": "Microsoft.Compute/disks",
"location": "West US",
"resource_group": "TestRG",
"name": "dd1",
"creation_data": {
"create_option": "Attach",
},
"disk_size_gb": 100,
"encryption_settings_collection": {
"enabled": True,
},
"max_shares": 10,
"network_access_policy": "AllowAll",
"os_type": "Windows",
"tier": "P4",
"sku": {
"name": "Standard_LRS",
},
"zones": [
"West US 2",
],
},
]
DESCRIBE_SNAPSHOTS = [
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/snapshots/ss0",
"type": "Microsoft.Compute/snapshots",
"location": "West US",
"resource_group": "TestRG",
"name": "ss0",
"creation_data": {
"create_option": "Attach",
},
"disk_size_gb": 100,
"encryption_settings_collection": {
"enabled": True,
},
"incremental": True,
"network_access_policy": "AllowAll",
"os_type": "Windows",
"tier": "P4",
"sku": {
"name": "Standard_LRS",
},
},
{
"id": "/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/snapshots/ss1",
"type": "Microsoft.Compute/snapshots",
"location": "West US",
"resource_group": "TestRG",
"name": "ss1",
"creation_data": {
"create_option": "Attach",
},
"disk_size_gb": 100,
"encryption_settings_collection": {
"enabled": True,
},
"incremental": True,
"network_access_policy": "AllowAll",
"os_type": "Windows",
"tier": "P4",
"sku": {
"name": "Standard_LRS",
},
},
]
DESCRIBE_VMEXTENSIONS = [
{
"id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachines/TestVM/extensions/extensions1",
"type":
"Microsoft.Compute/virtualMachines/extensions",
"resource_group":
"TestRG",
"name":
"extensions1",
"location": "West US",
"vm_id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/virtualMachines/TestVM",
},
{
"id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachines/TestVM1/extensions/extensions2",
"type":
"Microsoft.Compute/virtualMachines/extensions",
"resource_group":
"TestRG",
"name":
"extensions2",
"location": "West US",
"vm_id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/virtualMachines/TestVM1",
},
]
DESCRIBE_VMAVAILABLESIZES = [
{
"numberOfCores":
2,
"type":
"Microsoft.Compute/virtualMachines/availablesizes",
"osDiskSizeInMB":
1234,
"name":
"size1",
"resourceDiskSizeInMB":
2312,
"memoryInMB":
4352,
"maxDataDiskCount":
3214,
"vm_id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/virtualMachines/TestVM",
},
{
"numberOfCores":
2,
"type":
"Microsoft.Compute/virtualMachines/availablesizes",
"osDiskSizeInMB":
1234,
"name":
"size2",
"resourceDiskSizeInMB":
2312,
"memoryInMB":
4352,
"maxDataDiskCount":
3214,
"vm_id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/virtualMachines/TestVM1",
},
]
DESCRIBE_VMSCALESETS = [
{
"id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachineScaleSets/set1",
"type":
"Microsoft.Compute/virtualMachineScaleSets",
"resource_group":
"TestRG",
"name":
"set1",
"location": "West US",
},
{
"id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachineScaleSets/set2",
"type":
"Microsoft.Compute/virtualMachineScaleSets",
"resource_group":
"TestRG",
"name":
"set2",
"location": "West US",
},
]
DESCRIBE_VMSCALESETEXTENSIONS = [
{
"id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachineScaleSets/set1/extensions/extension1",
"type":
"Microsoft.Compute/virtualMachineScaleSets/extensions",
"resource_group":
"TestRG",
"name":
"extension1",
"set_id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachineScaleSets/set1",
},
{
"id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachineScaleSets/set2/extensions/extension2",
"type":
"Microsoft.Compute/virtualMachineScaleSets/extensions",
"resource_group":
"TestRG",
"name":
"extension2",
"set_id":
"/subscriptions/00-00-00-00/resourceGroups/TestRG/providers/Microsoft.Compute/\
virtualMachineScaleSets/set2",
},
]
| 28.249191
| 117
| 0.531447
| 708
| 8,729
| 6.399718
| 0.158192
| 0.052968
| 0.052968
| 0.083867
| 0.912823
| 0.912823
| 0.912823
| 0.912823
| 0.88369
| 0.820128
| 0
| 0.044258
| 0.316646
| 8,729
| 308
| 118
| 28.340909
| 0.715339
| 0
| 0
| 0.686667
| 0
| 0.066667
| 0.432237
| 0.218467
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54efb69123c9bd7cf2adce5a5b991ca4b28bc374
| 211
|
py
|
Python
|
traceback_with_variables/activate_in_ipython_by_import.py
|
dominicj-nylas/traceback_with_variables
|
899565d52f89587a29f14745ef5820b05bda8187
|
[
"MIT"
] | null | null | null |
traceback_with_variables/activate_in_ipython_by_import.py
|
dominicj-nylas/traceback_with_variables
|
899565d52f89587a29f14745ef5820b05bda8187
|
[
"MIT"
] | null | null | null |
traceback_with_variables/activate_in_ipython_by_import.py
|
dominicj-nylas/traceback_with_variables
|
899565d52f89587a29f14745ef5820b05bda8187
|
[
"MIT"
] | null | null | null |
"""
For the simplest usage possible. Just import it
"""
from traceback_with_variables.override import override_print_tb, ColorSchemes
override_print_tb(ipython=True, color_scheme=ColorSchemes.common)
| 23.444444
| 78
| 0.796209
| 27
| 211
| 5.962963
| 0.777778
| 0.161491
| 0.186335
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132701
| 211
| 8
| 79
| 26.375
| 0.879781
| 0.222749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
54f67f48877b302d5e82bbda6fee3630712d997f
| 36
|
py
|
Python
|
boa3_test/test_sc/list_test/StrList.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/list_test/StrList.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/list_test/StrList.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
def Main():
a = ['1', '2', '3']
| 12
| 23
| 0.305556
| 6
| 36
| 1.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 0.305556
| 36
| 2
| 24
| 18
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54fcd5815cc89227d0ee1d7ba779b7d58def1e39
| 322
|
py
|
Python
|
src/python/WMComponent/DBS3Buffer/Oracle/DBSBufferFiles/LoadBulkFilesByID.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMComponent/DBS3Buffer/Oracle/DBSBufferFiles/LoadBulkFilesByID.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMComponent/DBS3Buffer/Oracle/DBSBufferFiles/LoadBulkFilesByID.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_LoadBulkFilesByID_
Oracle implementation of DBSBufferFiles.LoadBulkFilesByID
"""
from WMComponent.DBS3Buffer.MySQL.DBSBufferFiles.LoadBulkFilesByID import LoadBulkFilesByID as MySQLLoadBulkFilesByID
class LoadBulkFilesByID(MySQLLoadBulkFilesByID):
"""
Same as MySQL
"""
pass
| 20.125
| 117
| 0.78882
| 27
| 322
| 9.333333
| 0.703704
| 0.246032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003584
| 0.13354
| 322
| 15
| 118
| 21.466667
| 0.899642
| 0.350932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0714d9eabb009b0b37899ca80e1f97cd28c4b79c
| 44
|
py
|
Python
|
falcon_casbin/__init__.py
|
alexferl/falcon-casbin
|
9bebee7ab31c3d274c5466d14d3db9ab786d5b85
|
[
"MIT"
] | null | null | null |
falcon_casbin/__init__.py
|
alexferl/falcon-casbin
|
9bebee7ab31c3d274c5466d14d3db9ab786d5b85
|
[
"MIT"
] | null | null | null |
falcon_casbin/__init__.py
|
alexferl/falcon-casbin
|
9bebee7ab31c3d274c5466d14d3db9ab786d5b85
|
[
"MIT"
] | null | null | null |
from .falcon_casbin import CasbinMiddleware
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
071eb267263df64cc6626036f3ea3a55a3ab317a
| 1,962
|
py
|
Python
|
metadata-ingestion/tests/unit/test_snowflake_source.py
|
LucaBassanesecuebiq/datahub
|
f659cc89388cc6322795c7ba7758d65775a7dedf
|
[
"Apache-2.0"
] | null | null | null |
metadata-ingestion/tests/unit/test_snowflake_source.py
|
LucaBassanesecuebiq/datahub
|
f659cc89388cc6322795c7ba7758d65775a7dedf
|
[
"Apache-2.0"
] | 1
|
2022-02-02T17:26:16.000Z
|
2022-02-02T17:26:16.000Z
|
metadata-ingestion/tests/unit/test_snowflake_source.py
|
liftoffio/datahub
|
ff163059587a137a22fa0c655f568a40eda44236
|
[
"Apache-2.0"
] | 1
|
2022-01-27T04:44:22.000Z
|
2022-01-27T04:44:22.000Z
|
def test_snowflake_uri_default_authentication():
from datahub.ingestion.source.sql.snowflake import SnowflakeConfig
config = SnowflakeConfig.parse_obj(
{
"username": "user",
"password": "password",
"host_port": "acctname",
"database": "demo",
"warehouse": "COMPUTE_WH",
"role": "sysadmin",
}
)
assert (
config.get_sql_alchemy_url()
== "snowflake://user:password@acctname/?authenticator=SNOWFLAKE&warehouse=COMPUTE_WH&role"
"=sysadmin&application=acryl_datahub"
)
def test_snowflake_uri_external_browser_authentication():
from datahub.ingestion.source.sql.snowflake import SnowflakeConfig
config = SnowflakeConfig.parse_obj(
{
"username": "user",
"host_port": "acctname",
"database": "demo",
"warehouse": "COMPUTE_WH",
"role": "sysadmin",
"authentication_type": "EXTERNAL_BROWSER_AUTHENTICATOR",
}
)
assert (
config.get_sql_alchemy_url()
== "snowflake://user@acctname/?authenticator=EXTERNALBROWSER&warehouse=COMPUTE_WH&role"
"=sysadmin&application=acryl_datahub"
)
def test_snowflake_uri_key_pair_authentication():
from datahub.ingestion.source.sql.snowflake import SnowflakeConfig
config = SnowflakeConfig.parse_obj(
{
"username": "user",
"host_port": "acctname",
"database": "demo",
"warehouse": "COMPUTE_WH",
"role": "sysadmin",
"authentication_type": "KEY_PAIR_AUTHENTICATOR",
"private_key_path": "/a/random/path",
"private_key_password": "a_random_password",
}
)
assert (
config.get_sql_alchemy_url()
== "snowflake://user@acctname/?authenticator=SNOWFLAKE_JWT&warehouse=COMPUTE_WH&role"
"=sysadmin&application=acryl_datahub"
)
| 30.65625
| 98
| 0.611621
| 175
| 1,962
| 6.565714
| 0.268571
| 0.083551
| 0.093995
| 0.114883
| 0.795474
| 0.795474
| 0.795474
| 0.795474
| 0.713664
| 0.713664
| 0
| 0
| 0.268603
| 1,962
| 63
| 99
| 31.142857
| 0.800697
| 0
| 0
| 0.566038
| 0
| 0
| 0.377676
| 0.205912
| 0
| 0
| 0
| 0
| 0.056604
| 1
| 0.056604
| false
| 0.056604
| 0.056604
| 0
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
075406a75fe60cbd2a03752f032a68031cbface8
| 23,346
|
py
|
Python
|
venv/lib/python3.8/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 4.1.0
# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import redfish_event_subscription
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
DESTINATION_INVALID = "The Parameter destination must have an HTTPS destination. The HTTP destination is not allowed"
SUBSCRIPTION_EXISTS = "No changes found to be applied."
SUBSCRIPTION_DELETED = "Successfully deleted the subscription."
SUBSCRIPTION_UNABLE_DEL = "Unable to delete the subscription."
SUBSCRIPTION_UNABLE_ADD = "Unable to add a subscription."
SUBSCRIPTION_ADDED = "Successfully added the subscription."
DESTINATION_MISMATCH = "No changes found to be applied."
EVENT_TYPE_INVALID = "value of event_type must be one of: Alert, MetricReport, got: Metricreport"
@pytest.fixture
def redfish_connection_mock(mocker, redfish_response_mock):
connection_class_mock = mocker.patch(MODULE_PATH + 'redfish_event_subscription.Redfish')
redfish_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
redfish_connection_mock_obj.invoke_request.return_value = redfish_response_mock
return redfish_connection_mock_obj
class TestRedfishSubscription(FakeAnsibleModule):
module = redfish_event_subscription
@pytest.mark.parametrize("val", [{"destination": "https://192.168.1.100:8188"},
{"destination": "https://192.168.1.100:8189"}])
def test_function_get_subscription_success(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": val["destination"]})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data1 = {
"@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination",
"@odata.id": "/redfish/v1/EventService/Subscriptions/c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80",
"@odata.type": "#EventDestination.v1_6_0.EventDestination",
"Context": "RedfishEvent",
"DeliveryRetryPolicy": "RetryForever",
"Description": "Event Subscription Details",
"Destination": "https://192.168.1.100:8189",
"EventFormatType": "Event",
"EventTypes": [
"Alert"
],
"EventTypes@odata.count": 1,
"HttpHeaders": [],
"HttpHeaders@odata.count": 0,
"Id": "c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80",
"MetricReportDefinitions": [],
"MetricReportDefinitions@odata.count": 0,
"Name": "EventSubscription c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80",
"OriginResources": [],
"OriginResources@odata.count": 0,
"Protocol": "Redfish",
"Status": {
"Health": "OK",
"HealthRollup": "OK",
"State": "Enabled"
},
"SubscriptionType": "RedfishEvent"
}
json_data2 = {
"@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination",
"@odata.id": "/redfish/v1/EventService/Subscriptions/c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"@odata.type": "#EventDestination.v1_6_0.EventDestination",
"Context": "RedfishEvent",
"DeliveryRetryPolicy": "RetryForever",
"Description": "Event Subscription Details",
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"EventTypes": [
"MetricReport"
],
"EventTypes@odata.count": 1,
"HttpHeaders": [],
"HttpHeaders@odata.count": 0,
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"MetricReportDefinitions": [],
"MetricReportDefinitions@odata.count": 0,
"Name": "EventSubscription c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"OriginResources": [],
"OriginResources@odata.count": 0,
"Protocol": "Redfish",
"Status": {
"Health": "OK",
"HealthRollup": "OK",
"State": "Enabled"
},
"SubscriptionType": "RedfishEvent"
}
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription_details',
side_effect=[json_data1, json_data2])
redfish_response_mock.json_data = {
"@odata.context": "/redfish/v1/$metadata#EventDestinationCollection.EventDestinationCollection",
"@odata.id": "/redfish/v1/EventService/Subscriptions",
"@odata.type": "#EventDestinationCollection.EventDestinationCollection",
"Description": "List of Event subscriptions",
"Members": [
{
"@odata.id": "/redfish/v1/EventService/Subscriptions/c6ff37fc-8204-11eb-b08f-2cea7ff7fe80"
},
{
"@odata.id": "/redfish/v1/EventService/Subscriptions/c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80"
}
],
"Members@odata.count": 2,
"Name": "Event Subscriptions Collection"
}
redfish_response_mock.success = True
f_module = self.get_module_mock(params=redfish_default_args)
result = self.module.get_subscription(redfish_connection_mock, val["destination"])
assert result["Destination"] == val["destination"]
@pytest.mark.parametrize("val", [
{"destination": "https://192.168.1.100:8188", "event_type": "MetricReport",
"event_format_type": "MetricReport"},
{"destination": "https://192.168.1.100:8188", "event_type": "Alert", "event_format_type": "Event"}])
def test_function_create_subscription(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": val["destination"]})
redfish_default_args.update({"event_type": val["event_type"]})
redfish_default_args.update({"event_format_type": val["event_format_type"]})
redfish_response_mock.json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"Destination": val["destination"],
"EventFormatType": val["event_format_type"],
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": [val["event_type"]],
"SubscriptionType": "RedfishEvent"
}
redfish_response_mock.success = True
f_module = self.get_module_mock(params=redfish_default_args)
result = self.module.create_subscription(redfish_connection_mock, f_module)
assert result.json_data["Destination"] == val["destination"]
assert result.json_data["EventFormatType"] == val["event_format_type"]
assert result.json_data["EventTypes"] == [val["event_type"]]
@pytest.mark.parametrize("val", [
{"destination": "https://100.96.80.1:161", "event_type": "MetricReport",
"event_format_type": "MetricReport"},
{"destination": "https://100.96.80.1:161", "event_type": "Alert", "event_format_type": "Event"}])
def test_function_get_subscription_details(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": val["destination"]})
redfish_default_args.update({"event_type": val["event_type"]})
redfish_default_args.update({"event_format_type": val["event_format_type"]})
redfish_response_mock.json_data = {
"@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination",
"@odata.id": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6",
"@odata.type": "#EventDestination.v1_9_0.EventDestination",
"Actions": {
"#EventDestination.ResumeSubscription": {
"target": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6/Actions/EventDestination.ResumeSubscription"
}
},
"Context": "RedfishEvent",
"DeliveryRetryPolicy": "RetryForever",
"Description": "Event Subscription Details",
"Destination": val['destination'],
"EventFormatType": val["event_format_type"],
"EventTypes": [val["event_type"]],
"EventTypes@odata.count": 1,
"HttpHeaders": [],
"HttpHeaders@odata.count": 0,
"Id": "087b9026-0afa-11ec-8120-4cd98f5fc5a6",
"Name": "EventSubscription 087b9026-0afa-11ec-8120-4cd98f5fc5a6",
"Protocol": "Redfish",
"Status": {
"Health": "OK",
"HealthRollup": "OK",
"State": "Enabled"
},
"SubscriptionType": "RedfishEvent"
}
redfish_response_mock.success = True
result = self.module.get_subscription_details(redfish_connection_mock, "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80")
assert result["Destination"] == val["destination"]
assert result["EventFormatType"] == val["event_format_type"]
assert result["EventTypes"] == [val["event_type"]]
@pytest.mark.parametrize("val", [
{"destination": "https://100.96.80.1:161", "event_type": "MetricReport",
"event_format_type": "MetricReport"},
{"destination": "https://100.96.80.1:161", "event_type": "Alert", "event_format_type": "Event"}])
def test_function_get_subscription_details_None(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": val["destination"]})
redfish_default_args.update({"event_type": val["event_type"]})
redfish_default_args.update({"event_format_type": val["event_format_type"]})
redfish_response_mock.json_data = {
"@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination",
"@odata.id": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6",
"@odata.type": "#EventDestination.v1_9_0.EventDestination",
"Actions": {
"#EventDestination.ResumeSubscription": {
"target": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6/Actions/EventDestination.ResumeSubscription"
}
},
"Context": "RedfishEvent",
"DeliveryRetryPolicy": "RetryForever",
"Description": "Event Subscription Details",
"Destination": val['destination'],
"EventFormatType": val["event_format_type"],
"EventTypes": [val["event_type"]],
"EventTypes@odata.count": 1,
"HttpHeaders": [],
"HttpHeaders@odata.count": 0,
"Id": "087b9026-0afa-11ec-8120-4cd98f5fc5a6",
"Name": "EventSubscription 087b9026-0afa-11ec-8120-4cd98f5fc5a6",
"Protocol": "Redfish",
"Status": {
"Health": "OK",
"HealthRollup": "OK",
"State": "Enabled"
},
"SubscriptionType": "RedfishEvent"
}
redfish_response_mock.success = False
result = self.module.get_subscription_details(redfish_connection_mock, "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80")
assert result is None
@pytest.mark.parametrize("val", [
{"destination": "https://100.96.80.1:161"},
{"destination": "https://100.96.80.1:161"}])
def test_function_delete_subscription(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": val["destination"]})
redfish_response_mock.json_data = {
"@Message.ExtendedInfo": [
{
"Message": "Successfully Completed Request",
"MessageArgs": [],
"MessageArgs@odata.count": 0,
"MessageId": "Base.1.7.Success",
"RelatedProperties": [],
"RelatedProperties@odata.count": 0,
"Resolution": "None",
"Severity": "OK"
},
{
"Message": "The operation successfully completed.",
"MessageArgs": [],
"MessageArgs@odata.count": 0,
"MessageId": "IDRAC.2.4.SYS413",
"RelatedProperties": [],
"RelatedProperties@odata.count": 0,
"Resolution": "No response action is required.",
"Severity": "Informational"
}
]
}
redfish_response_mock.success = True
result = self.module.delete_subscription(redfish_connection_mock, "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80")
assert result.json_data["@Message.ExtendedInfo"][0]["Message"] == "Successfully Completed Request"
assert result.json_data["@Message.ExtendedInfo"][1]["Message"] == "The operation successfully completed."
def test_module_validation_input_params(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": "http://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
with pytest.raises(Exception) as err:
self._run_module(redfish_default_args)
assert err.value.args[0]['msg'] == DESTINATION_INVALID
def test_module_absent_does_not_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
redfish_connection_mock.patch(
MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None)
redfish_response_mock.success = True
result = self._run_module(redfish_default_args)
assert result["msg"] == DESTINATION_MISMATCH
def test_module_absent_does_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": ["MetricReport"],
"SubscriptionType": "RedfishEvent"
}
redfish_response_mock.success = True
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=json_data)
mocker.patch(MODULE_PATH + 'redfish_event_subscription.delete_subscription', return_value=redfish_response_mock)
f_module = self.get_module_mock()
result = self._run_module(redfish_default_args)
print(result)
assert result["msg"] == SUBSCRIPTION_DELETED
def test_module_absent_does_exist_error(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": ["MetricReport"],
"SubscriptionType": "RedfishEvent"
}
redfish_response_mock.success = False
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=json_data)
mocker.patch(MODULE_PATH + 'redfish_event_subscription.delete_subscription', return_value=redfish_response_mock)
with pytest.raises(Exception) as err:
self._run_module(redfish_default_args)
assert err.value.args[0]['msg'] == SUBSCRIPTION_UNABLE_DEL
def test_module_present_does_not_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": ["MetricReport"],
"SubscriptionType": "RedfishEvent"
}
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None)
create_subscription_response_mock = redfish_response_mock
create_subscription_response_mock.json_data = json_data
mocker.patch(MODULE_PATH + 'redfish_event_subscription.create_subscription',
return_value=create_subscription_response_mock)
f_module = self.get_module_mock()
redfish_response_mock.success = True
result = self._run_module(redfish_default_args)
print(result)
assert result["msg"] == SUBSCRIPTION_ADDED
def test_module_present_does_not_exist_error(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": ["MetricReport"],
"SubscriptionType": "RedfishEvent"
}
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None)
create_subscription_response_mock = redfish_response_mock
create_subscription_response_mock.json_data = json_data
mocker.patch(MODULE_PATH + 'redfish_event_subscription.create_subscription',
return_value=create_subscription_response_mock)
redfish_response_mock.success = False
with pytest.raises(Exception) as err:
self._run_module(redfish_default_args)
assert err.value.args[0]['msg'] == SUBSCRIPTION_UNABLE_ADD
def test_module_present_does_not_exist_error_wrong_input(self, mocker, redfish_connection_mock,
redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "Metricreport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": ["MetricReport"],
"SubscriptionType": "RedfishEvent"
}
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None)
create_subscription_response_mock = redfish_response_mock
create_subscription_response_mock.json_data = json_data
mocker.patch(MODULE_PATH + 'redfish_event_subscription.create_subscription',
return_value=create_subscription_response_mock)
f_module = self.get_module_mock()
redfish_response_mock.success = True
with pytest.raises(Exception) as err:
self._run_module(redfish_default_args)
print(err)
assert err.value.args[0]['msg'] == EVENT_TYPE_INVALID
def test_module_present_does_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
"Destination": "https://192.168.1.100:8188",
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
"EventTypes": ["MetricReport"],
"SubscriptionType": "RedfishEvent"
}
mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=json_data)
redfish_response_mock.success = True
result = self._run_module(redfish_default_args)
assert result["msg"] == SUBSCRIPTION_EXISTS
| 51.536424
| 151
| 0.628545
| 2,223
| 23,346
| 6.316689
| 0.098965
| 0.072782
| 0.093576
| 0.085458
| 0.865475
| 0.831007
| 0.793049
| 0.772967
| 0.746973
| 0.736006
| 0
| 0.04666
| 0.249079
| 23,346
| 452
| 152
| 51.650442
| 0.754321
| 0.00998
| 0
| 0.687042
| 0
| 0.00489
| 0.357845
| 0.13153
| 0
| 0
| 0
| 0
| 0.04401
| 1
| 0.03423
| false
| 0
| 0.00978
| 0
| 0.051345
| 0.00978
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
075b1948aa6cf3300e55cd06ba2dcc855543dcbe
| 18,578
|
py
|
Python
|
chaosoci/core/compute/actions.py
|
LaudateCorpus1/chaostoolkit-oci
|
36da01a47dd1b0881ec21cb70775fde5011b38ed
|
[
"Apache-2.0"
] | 15
|
2018-11-20T15:36:52.000Z
|
2021-12-16T21:46:56.000Z
|
chaosoci/core/compute/actions.py
|
LaudateCorpus1/chaostoolkit-oci
|
36da01a47dd1b0881ec21cb70775fde5011b38ed
|
[
"Apache-2.0"
] | 21
|
2018-11-26T19:11:52.000Z
|
2021-12-15T19:38:37.000Z
|
chaosoci/core/compute/actions.py
|
LaudateCorpus1/chaostoolkit-oci
|
36da01a47dd1b0881ec21cb70775fde5011b38ed
|
[
"Apache-2.0"
] | 8
|
2018-11-20T15:37:09.000Z
|
2021-07-28T20:27:19.000Z
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from random import choice
from typing import Any, Dict, List
import oci
from chaoslib.exceptions import ActivityFailed, FailedActivity
from chaoslib.types import Configuration, Secrets
from oci.retry import DEFAULT_RETRY_STRATEGY
from chaosoci import oci_client
from chaosoci.types import OCIResponse
from logzero import logger
from oci.config import from_file
from oci.core import ComputeClient, ComputeManagementClient
from .common import (filter_instances,
get_instances, get_instance_pools, filter_instance_pools)
__all__ = ["stop_instance", "stop_random_instance", "stop_instances_in_compartment",
"start_instance_pool", "start_all_instance_pools_in_compartment",
"stop_instance_pool", "stop_all_instance_pools_in_compartment",
"terminate_instance_pool", "terminate_all_instance_pools_in_compartment",
"reset_instance_pool", "reset_all_instance_pools_in_compartment",
"softreset_instance_pool", "softreset_all_instance_pools_in_compartment"]
# Compute Client Actions
def stop_instance(instance_id: str, force: bool = False,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""Stop a given Compute instance."""
client = oci_client(ComputeClient, configuration, secrets,
skip_deserialization=True)
action = "STOP" if force else "SOFTSTOP"
ret = client.instance_action(instance_id=instance_id, action=action).data
return ret
def stop_random_instance(filters: List[Dict[str, Any]],
compartment_id: str = None,
force: bool = False,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""
Stop a a random compute instance within a given compartment.
If filters are provided, the scope will be reduced to those instances
matching the filters.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.Instance.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section.
""" # noqa: E501
client = oci_client(ComputeClient, configuration, secrets,
skip_deserialization=False)
action = "STOP" if force else "SOFTSTOP"
compartment_id = compartment_id or from_file().get('compartment')
if compartment_id is None:
raise ActivityFailed('We have not been able to find a compartment,'
' without one, we cannot continue.')
instances = get_instances(client, compartment_id)
filters = filters or None
if filters is not None:
instances = filter_instances(instances, filters=filters)
instance_id = choice(instances).id
s_client = oci_client(ComputeClient, configuration, secrets,
skip_deserialization=True)
ret = s_client.instance_action(instance_id=instance_id, action=action)
return ret.data
def stop_instances_in_compartment(filters: List[Dict[str, Any]],
instances_ids: List[str] = None,
configuration: Configuration = None,
compartment_id: str = None,
secrets: Secrets = None) -> OCIResponse:
"""Stop the given OCI Compute instances, If only an Compartment is specified, all instances in
that Compartment will be stopped. If you need more control, you can
also provide a list of filters following the documentation.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.Instance.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section."""
client = oci_client(ComputeClient, configuration, secrets,
skip_deserialization=True)
if not instances_ids:
logger.warning('Based on configuration provided I am going to '
'stop all instances in the Compartment %s! matching the filter criteria'
% compartment_id)
compartment_id = compartment_id or from_file().get('compartment')
instances = get_instances(client, compartment_id)
filters = filters or None
if filters is not None:
instances_ids = filter_instances(instances, filters=filters)
if not instances_ids:
raise FailedActivity(
'No instances found matching filters: %s' % str(filters))
logger.debug('Instances in Compartment %s selected: %s}.' % (
compartment_id, str(instances_ids)))
stop_instances_ret = []
for instance_id in instances_ids:
logger.debug("Picked Compute Instance '{}' from Compartment '{}' to be stopped", instance_id, compartment_id)
stop_instances_ret.append(stop_instance(instance_id, False))
return stop_instances_ret
# Compute Client Management Actions
def stop_instance_pool(instance_pool_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""Stop the given OCI Compute instance pool."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
stop_instance_pool_response = client.stop_instance_pool(
instance_pool_id).data
return stop_instance_pool_response
def stop_all_instance_pools_in_compartment(instance_pool_ids: List[str],
filters: List[Dict[str, Any]],
configuration: Configuration = None,
compartment_id: str = None,
secrets: Secrets = None) -> OCIResponse:
"""Stop the given OCI Compute instance pool. If you need more control, you can
also provide a list of filters following the documentation.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.InstancePool.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
if not instance_pool_ids:
logger.warning('Based on configuration provided I am going to '
'stop all Instance Pools in the Compartment %s! matching the filter criteria'
% compartment_id)
compartment_id = compartment_id or from_file().get('compartment')
instance_pools = get_instance_pools(client, compartment_id)
filters = filters or None
if filters is not None:
instance_pool_ids = filter_instance_pools(instance_pools, filters=filters)
if not instance_pool_ids:
raise FailedActivity(
'No Instance Pools found matching filters: %s' % str(filters))
logger.debug('Instance Pools in Compartment %s selected: %s}.' % (
compartment_id, str(instance_pool_ids)))
stop_instance_pool_response = []
for instance_pool_id in instance_pool_ids:
logger.debug("Picked Compute Instance Pool '{}' from Compartment '{}' to be stopped", instance_pool_id,
compartment_id)
stop_instance_pool_response = stop_instance_pool(
instance_pool_id)
return stop_instance_pool_response
def start_instance_pool(instance_pool_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""Start the given OCI Compute instances."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
start_instance_pool_response = client.start_instance_pool(
instance_pool_id).data
return start_instance_pool_response
def start_all_instance_pools_in_compartment(instance_pool_ids: List[str],
filters: List[Dict[str, Any]],
configuration: Configuration = None,
compartment_id: str = None,
secrets: Secrets = None) -> OCIResponse:
"""Start the given OCI Compute instances, If only an Compartment is specified, all instances in
that Compartment will be stopped. If you need more control, you can
also provide a list of filters following the documentation.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.InstancePool.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
if not instance_pool_ids:
logger.warning('Based on configuration provided I am going to '
'Start all Instance Pools in the Compartment %s! matching the filter criteria'
% compartment_id)
compartment_id = compartment_id or from_file().get('compartment')
instance_pools = get_instance_pools(client, compartment_id)
filters = filters or None
if filters is not None:
instance_pool_ids = filter_instance_pools(instance_pools, filters=filters)
if not instance_pool_ids:
raise FailedActivity(
'No Instance Pools found matching filters: %s' % str(filters))
logger.debug('Instance Pools in Compartment %s selected: %s}.' % (
compartment_id, str(instance_pool_ids)))
start_instance_pool_response = []
for instance_pool_id in instance_pool_ids:
logger.debug("Picked Compute Instance Pool '{}' from Compartment '{}' to be started", instance_pool_id,
compartment_id)
start_instance_pool_response = start_instance_pool(
instance_pool_id)
return start_instance_pool_response
def terminate_instance_pool(instance_pool_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""Terminate the given OCI Compute instances."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
terminate_instance_pool_response = client.terminate_instance_pool(
instance_pool_id).data
return terminate_instance_pool_response
def terminate_all_instance_pools_in_compartment(instance_pool_ids: List[str],
filters: List[Dict[str, Any]],
configuration: Configuration = None,
compartment_id: str = None,
secrets: Secrets = None) -> OCIResponse:
"""Terminate the given OCI Compute instances, If only an Compartment is specified, all instances in
that Compartment will be terminated. If you need more control, you can
also provide a list of filters following the documentation.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.InstancePool.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
if not instance_pool_ids:
logger.warning('Based on configuration provided I am going to '
'Terminate all Instance Pools in the Compartment %s! matching the filter criteria'
% compartment_id)
compartment_id = compartment_id or from_file().get('compartment')
instance_pools = get_instance_pools(client, compartment_id)
filters = filters or None
if filters is not None:
instance_pool_ids = filter_instance_pools(instance_pools, filters=filters)
if not instance_pool_ids:
raise FailedActivity(
'No Instance Pools found matching filters: %s' % str(filters))
logger.debug('Instance Pools in Compartment %s selected: %s}.' % (
compartment_id, str(instance_pool_ids)))
terminate_instance_pool_response = []
for instance_pool_id in instance_pool_ids:
logger.debug("Picked Compute Instance Pool '{}' from Compartment '{}' to be terminated", instance_pool_id,
compartment_id)
terminate_instance_pool_response = terminate_instance_pool(
instance_pool_id)
return terminate_instance_pool_response
def reset_instance_pool(instance_pool_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""Reset the given OCI Compute Instance Pools"""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
reset_instance_pool_response = client.reset_instance_pool(
instance_pool_id).data
return reset_instance_pool_response
def reset_all_instance_pools_in_compartment(instance_pool_ids: List[str],
filters: List[Dict[str, Any]],
configuration: Configuration = None,
compartment_id: str = None,
secrets: Secrets = None) -> OCIResponse:
"""Reset the given OCI Compute Instance Pools, If only an Compartment is specified, all Instance Pools in
that Compartment will be Reset. If you need more control, you can
also provide a list of filters following the documentation.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.InstancePool.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
if not instance_pool_ids:
logger.warning('Based on configuration provided I am going to '
'Reset all Instance Pools in the Compartment %s! matching the filter criteria'
% compartment_id)
compartment_id = compartment_id or from_file().get('compartment')
instance_pools = get_instance_pools(client, compartment_id)
filters = filters or None
if filters is not None:
instance_pool_ids = filter_instance_pools(instance_pools, filters=filters)
if not instance_pool_ids:
raise FailedActivity(
'No instances found matching filters: %s' % str(filters))
logger.debug('Instance Pools in Compartment %s selected: %s}.' % (
compartment_id, str(instance_pool_ids)))
reset_instance_pool_response = []
for instance_pool_id in instance_pool_ids:
logger.debug("Picked Compute Instance Pool '{}' from Compartment '{}' to be reset", instance_pool_id,
compartment_id)
reset_instance_pool_response = reset_instance_pool(
instance_pool_id)
return reset_instance_pool_response
def softreset_instance_pool(instance_pool_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> OCIResponse:
"""Soft Reset the given OCI Compute instance pool."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
softreset_instance_pool_response = client.softreset_instance_pool(
instance_pool_id).data
return softreset_instance_pool_response
def softreset_all_instance_pools_in_compartment(instance_pool_ids: List[str],
filters: List[Dict[str, Any]],
configuration: Configuration = None,
compartment_id: str = None,
secrets: Secrets = None) -> OCIResponse:
"""SoftReset the given OCI Compute Instance Pools, If only an Compartment is specified, all Instance Pools in
that Compartment will be SoftReset. If you need more control, you can
also provide a list of filters following the documentation.
Please refer to: https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/core/models/oci.core.models.InstancePool.html#oci.core.models.Instance
for details on the available filters under the 'parameters' section."""
client = oci_client(ComputeManagementClient, configuration, secrets,
skip_deserialization=True)
if not instance_pool_ids:
logger.warning('Based on configuration provided I am going to '
'terminate all Instance Pools in the Compartment %s! matching the filter criteria'
% compartment_id)
compartment_id = compartment_id or from_file().get('compartment')
instance_pools = get_instance_pools(client, compartment_id)
filters = filters or None
if filters is not None:
instance_pool_ids = filter_instance_pools(instance_pools, filters=filters)
if not instance_pool_ids:
raise FailedActivity(
'No Instance Pools found matching filters: %s' % str(filters))
logger.debug('Instance Pools in Compartment %s selected: %s}.' % (
compartment_id, str(instance_pool_ids)))
softreset_instance_pool_response = []
for instance_pool_id in instance_pool_ids:
logger.debug("Picked Compute Instance Pool '{}' from Compartment '{}' to be stopped", instance_pool_id,
compartment_id)
softreset_instance_pool_response = softreset_instance_pool(
instance_pool_id)
return softreset_instance_pool_response
| 44.338902
| 167
| 0.655991
| 2,059
| 18,578
| 5.70374
| 0.074794
| 0.110354
| 0.038317
| 0.026056
| 0.856437
| 0.833447
| 0.763028
| 0.747275
| 0.738249
| 0.701379
| 0
| 0.000297
| 0.275649
| 18,578
| 418
| 168
| 44.444976
| 0.872408
| 0.182689
| 0
| 0.694118
| 0
| 0
| 0.147896
| 0.01847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05098
| false
| 0
| 0.05098
| 0
| 0.152941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4ad651047c08a560c093e713ff4bac283fe4b28d
| 24
|
py
|
Python
|
lib/rpn/__init__.py
|
brijml/Object-Detection-Faster-RCNN
|
86e115575767584584ab4e5572bc4abed8180eb5
|
[
"MIT"
] | null | null | null |
lib/rpn/__init__.py
|
brijml/Object-Detection-Faster-RCNN
|
86e115575767584584ab4e5572bc4abed8180eb5
|
[
"MIT"
] | null | null | null |
lib/rpn/__init__.py
|
brijml/Object-Detection-Faster-RCNN
|
86e115575767584584ab4e5572bc4abed8180eb5
|
[
"MIT"
] | null | null | null |
from train_rpn import *
| 12
| 23
| 0.791667
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4aea9800cc4c41124456eba9678e1f1f4020bd93
| 36,231
|
py
|
Python
|
packages/pegasus-python/src/Pegasus/service/monitoring/views.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 127
|
2015-01-28T19:19:13.000Z
|
2022-03-31T05:57:40.000Z
|
packages/pegasus-python/src/Pegasus/service/monitoring/views.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 14
|
2015-04-15T17:44:20.000Z
|
2022-02-22T22:48:49.000Z
|
packages/pegasus-python/src/Pegasus/service/monitoring/views.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 70
|
2015-01-22T15:20:32.000Z
|
2022-02-21T22:50:23.000Z
|
import hashlib
import logging
from flask import g, make_response, request
from Pegasus.service import cache
from Pegasus.service._serialize import jsonify
from Pegasus.service.base import OrderedDict
from Pegasus.service.monitoring import monitoring as blueprint
from Pegasus.service.monitoring.queries import (
MasterWorkflowQueries,
StampedeWorkflowQueries,
)
log = logging.getLogger(__name__)
JSON_HEADER = {"Content-Type": "application/json"}
@blueprint.url_value_preprocessor
def pull_m_wf_id(endpoint, values):
"""
If the requested endpoint contains a value for m_wf_id variable then extract it and set it in g.m_wf_id.
"""
if values and "m_wf_id" in values:
g.m_wf_id = values["m_wf_id"]
@blueprint.url_value_preprocessor
def pull_url_context(endpoint, values):
"""
Create a context which can be used when generating url in link section of the responses.
"""
url_context = {}
keys = ["wf_id", "job_id", "task_id", "job_instance_id", "host_id", "instance_id"]
if values:
for key in keys:
if key in values:
url_context[key] = values[key]
else:
if url_context:
g.url_context = url_context
@blueprint.before_request
def compute_stampede_db_url():
"""
If the requested endpoint requires connecting to a STAMPEDE database, then determine STAMPEDE DB URL and store it
in g.stampede_db_url. Also, set g.m_wf_id to be the root workflow's uuid
"""
if "/workflow" not in request.path or "m_wf_id" not in g:
return
md5sum = hashlib.md5()
md5sum.update(g.master_db_url.encode("utf-8"))
m_wf_id = g.m_wf_id
def _get_cache_key(key_suffix):
return "{}.{}".format(md5sum.hexdigest(), key_suffix)
cache_key = _get_cache_key(m_wf_id)
if cache.get(cache_key):
log.debug("Cache Hit: compute_stampede_db_url %s" % cache_key)
root_workflow = cache.get(cache_key)
else:
log.debug("Cache Miss: compute_stampede_db_url %s" % cache_key)
queries = MasterWorkflowQueries(g.master_db_url)
root_workflow = queries.get_root_workflow(m_wf_id)
queries.close()
cache.set(_get_cache_key(root_workflow.wf_id), root_workflow, timeout=600)
cache.set(_get_cache_key(root_workflow.wf_uuid), root_workflow, timeout=600)
g.url_m_wf_id = root_workflow.wf_id
g.m_wf_id = root_workflow.wf_uuid
g.stampede_db_url = root_workflow.db_url
@blueprint.before_request
def get_query_args():
g.query_args = {}
def to_int(q_arg, value):
try:
return int(value)
except ValueError as e:
log.exception(
"Query Argument {} = {} is not a valid int".format(q_arg, value)
)
e = ValueError(
"Expecting integer for argument {}, found {!r}".format(
q_arg, str(value)
)
)
e.codes = ("INVALID_QUERY_ARGUMENT", 400)
raise e from None
def to_str(q_arg, value):
return value
def to_bool(q_arg, value):
value = value.strip().lower()
if value in {"1", "true"}:
return True
elif value in {"0", "false"}:
return False
else:
log.exception(
"Query Argument {} = {} is not a valid boolean".format(q_arg, value)
)
e = ValueError(
"Expecting boolean for argument {}, found {!r}".format(
q_arg, str(value)
)
)
e.codes = ("INVALID_QUERY_ARGUMENT", 400)
raise e
query_args = OrderedDict(
[
("pretty-print", to_bool),
("start-index", to_int),
("max-results", to_int),
("query", to_str),
("order", to_str),
]
)
for arg, cast in query_args.items():
if arg in request.args:
g.query_args[arg.replace("-", "_")] = cast(arg, request.args.get(arg))
"""
Root Workflow
{
"wf_id" : int:wf_id,
"wf_uuid" : string:wf_uuid,
"submit_hostname" : string:submit_hostname,
"submit_dir" : string:submit_dir,
"planner_arguments" : string:planner_arguments,
"planner_version" : string:planner_version,
"user" : string:user,
"grid_dn" : string:grid_dn,
"dax_label" : string:dax_label,
"dax_version" : string:dax_version,
"dax_file" : string:dax_file,
"dag_file_name" : string:dag_file_name,
"timestamp" : int:timestamp,
"workflow_state" : object:workflow_state,
"_links" : {
"workflow" : href:workflow
}
}
"""
@blueprint.route("/root")
def get_root_workflows(username):
"""
Returns a collection of root level workflows.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response.
:statuscode 200: OK
:statuscode 204: No content; when no workflows found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Root Workflow
"""
queries = MasterWorkflowQueries(g.master_db_url)
paged_response = queries.get_root_workflows(**g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>")
def get_root_workflow(username, m_wf_id):
"""
Returns root level workflow identified by m_wf_id.
:query boolean pretty-print: Return formatted JSON response.
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Root Workflow
"""
queries = MasterWorkflowQueries(g.master_db_url)
record = queries.get_root_workflow(m_wf_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow
{
"wf_id" : int:wf_id,
"root_wf_id" : int:root_wf_id,
"parent_wf_id" : int:parent_wf_id,
"wf_uuid" : string:wf_uuid,
"submit_hostname" : string:submit_hostname,
"submit_dir" : string:submit_dir,
"planner_arguments" : string:planner_arguments,
"planner_version" : string:planner_version,
"user" : string:user,
"grid_dn" : string:grid_dn,
"dax_label" : string:dax_label,
"dax_version" : string:dax_version,
"dax_file" : string:dax_file,
"dag_file_name" : string:dag_file_name,
"timestamp" : int:timestamp,
"_links" : {
"workflow_meta" : href:workflow_meta,
"workflow_state" : href:workflow_state,
"job" : href:job,
"task" : href:task,
"host" : href:host,
"invocation" : href:invocation
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow")
def get_workflows(username, m_wf_id):
"""
Returns a collection of workflows.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflows found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Workflow
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflows(g.m_wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>")
def get_workflow(username, m_wf_id, wf_id):
"""
Returns workflow identified by m_wf_id, wf_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Workflow
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_workflow(wf_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow Meta
{
"key" : string:key,
"value" : string:value,
"_links" : {
"workflow" : <href:workflow>
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/meta")
def get_workflow_meta(username, m_wf_id, wf_id):
"""
Returns a collection of workflow's metadata.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflow metadata found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: WorkflowMeta
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_meta(g.m_wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow Files
{
"wf_id" : int:wf_id,
"lfn_id" : string:lfn_id,
"lfn" : string:lfn,
"pfns" : [
{
"pfn_id" : <int:pfn_id>
"pfn" : <string:pfn>
"site" : <string:site>
}
],
"meta" : [
{
"meta_id" : <int:meta_id>
"key" : <string:key>
"value" : <string:value>
}
],
"_links" : {
"workflow" : <href:workflow>
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/files")
def get_workflow_files(username, m_wf_id, wf_id):
"""
Returns a collection of workflows.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflows found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Workflow
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_files(g.m_wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow State
{
"wf_id" : int:wf_id,
"state" : string:state,
"status" : int:status,
"restart_count" : int:restart_count,
"timestamp" : datetime:timestamp,
"_links" : {
"workflow" : <href:workflow>
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/state")
def get_workflow_state(username, m_wf_id, wf_id):
"""
Returns a collection of Workflow States.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflowstates found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: WorkflowState
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_state(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Job
{
"job_id" : int: job_id,
"exec_job_id" : string: exec_job_id,
"submit_file" : string: submit_file,
"type_desc" : string: type_desc,
"max_retries" : int: max_retries,
"clustered" : bool: clustered,
"task_count" : int: task_count,
"executable" : string: executable,
"argv" : string: argv,
"task_count" : int:task_count,
"_links" : {
"workflow" : href:workflow,
"task" : href:task,
"job_instance" : href:job_instance
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job")
def get_workflow_jobs(username, m_wf_id, wf_id):
"""
Returns a collection of Jobs.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no jobs found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Job
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_jobs(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job/<int:job_id>")
def get_job(username, m_wf_id, wf_id, job_id):
"""
Returns job identified by m_wf_id, wf_id, job_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Job
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_job(job_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Host
{
"host_id" : int:host_id,
"site" : string:site,
"hostname" : string:hostname,
"ip" : string:ip,
"uname" : string:uname,
"total_memory" : string:total_memory,
"_links" : {
"workflow" : href:workflow
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/host")
def get_workflow_hosts(username, m_wf_id, wf_id):
"""
Returns a collection of Hosts.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no hosts found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Host
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_hosts(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/host/<int:host_id>")
def get_host(username, m_wf_id, wf_id, host_id):
"""
Returns host identified by m_wf_id, wf_id, host_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Host
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_host(host_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Job State
{
"job_instance_id" : "<int:job_instance_id>",
"state" : "<string:state>",
"jobstate_submit_seq" : "<int:jobstate_submit_seq>",
"timestamp" : "<int:timestamp>",
"_links" : {
"job_instance" : "href:job_instance"
}
}
"""
@blueprint.route(
"/root/<string:m_wf_id>/workflow/<string:wf_id>/job/<int:job_id>/job-instance/<int:job_instance_id>/state"
)
def get_job_instance_states(username, m_wf_id, wf_id, job_id, job_instance_id):
"""
Returns a collection of Job States.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no jobstates found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: JobState
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_job_instance_states(
wf_id, job_id, job_instance_id, **g.query_args
)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Task
{
"task_id" : int:task_id,
"abs_task_id" : string:abs_task_id,
"type_desc" : string: type_desc,
"transformation" : string:transformation,
"argv" : string:argv,
"task_count" : int:task_count,
"_links" : {
"workflow" : href:workflow,
"job" : href:job,
"task_meta" : href:task_meta
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/task")
def get_workflow_tasks(username, m_wf_id, wf_id):
"""
Returns a collection of Tasks.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no tasks found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Task
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_tasks(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job/<int:job_id>/task")
def get_job_tasks(username, m_wf_id, wf_id, job_id):
"""
Returns a collection of Tasks.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no tasks found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Task
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_job_tasks(wf_id, job_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/task/<int:task_id>")
def get_task(username, m_wf_id, wf_id, task_id):
"""
Returns task identified by m_wf_id, wf_id, task_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Task
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_task(task_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Task Meta
{
"key" : string:key,
"value" : string:value,
"_links" : {
"task" : "<href:task>"
}
}
"""
@blueprint.route(
"/root/<string:m_wf_id>/workflow/<string:wf_id>/task/<int:task_id>/meta"
)
def get_task_meta(username, m_wf_id, wf_id, task_id):
"""
Returns a collection of task's metadata.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflow metadata found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: TaskMeta
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_task_meta(task_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Job Instance
{
"job_instance_id" : int:job_instance_id,
"host_id" : int:host_id,
"job_submit_seq" : int:job_submit_seq,
"sched_id" : string:sched_id,
"site" : string:site,
"user" : string:user,
"work_dir" : string:work_dir,
"cluster_start" : int:cluster_start,
"cluster_duration" : int:cluster_duration,
"local_duration" : int:local_duration,
"subwf_id" : int:subwf_id,
"stdout_text" : string:stdout_text,
"stderr_text" : string:stderr_text,
"stdin_file" : string:stdin_file,
"stdout_file" : string:stdout_file,
"stderr_file" : string:stderr_file,
"multiplier_factor" : int:multiplier_factor,
"exitcode" : int:exitcode,
"_links" : {
"job_state" : href:job_state,
"host" : href:host,
"invocation" : href:invocation,
"job" : href:job
}
}
"""
@blueprint.route(
"/root/<string:m_wf_id>/workflow/<string:wf_id>/job/<int:job_id>/job-instance"
)
def get_job_instances(username, m_wf_id, wf_id, job_id):
"""
Returns a collection of JobInstances.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no job instances found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: JobInstance
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_job_instances(
wf_id,
job_id,
recent=request.args.get("recent", "false") == "true",
**g.query_args,
)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route(
"/root/<string:m_wf_id>/workflow/<string:wf_id>/job-instance/<int:job_instance_id>"
)
def get_job_instance(username, m_wf_id, wf_id, job_instance_id):
"""
Returns job instance identified by m_wf_id, wf_id, job_id, job_instance_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: JobInstance
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_job_instance(job_instance_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Invocation
{
"invocation_id" : int:invocation_id,
"job_instance_id" : int:job_instance_id,
"abs_task_id" : string:abs_task_id,
"task_submit_seq" : int:task_submit_seq,
"start_time" : int:start_time,
"remote_duration" : int:remote_duration,
"remote_cpu_time" : int:remote_cpu_time,
"exitcode" : int:exitcode,
"transformation" : string:transformation,
"executable" : string:executable,
"argv" : string:argv,
"_links" : {
"workflow" : href:workflow,
"job_instance" : href:job_instance
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/invocation")
def get_workflow_invocations(username, m_wf_id, wf_id):
"""
Returns a collection of Invocations.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no invocations found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Invocation
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_invocations(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route(
"/root/<string:m_wf_id>/workflow/<string:wf_id>/job/<int:job_id>/job-instance/<int:job_instance_id>/invocation"
)
def get_job_instance_invocations(username, m_wf_id, wf_id, job_id, job_instance_id):
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_job_instance_invocations(
wf_id, job_id, job_instance_id, **g.query_args
)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route(
"/root/<string:m_wf_id>/workflow/<string:wf_id>/invocation/<int:invocation_id>"
)
def get_invocation(username, m_wf_id, wf_id, invocation_id):
"""
Returns invocation identified by m_wf_id, wf_id, invocation_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Invocation
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_invocation(invocation_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Utilities
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job/running")
def get_running_jobs(username, m_wf_id, wf_id):
"""
Returns a collection of running Jobs.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no jobs found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Job
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_running_jobs(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job/successful")
def get_successful_jobs(username, m_wf_id, wf_id):
"""
Returns a collection of successful Jobs.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no jobs found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Job
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_successful_jobs(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job/failed")
def get_failed_jobs(username, m_wf_id, wf_id):
"""
Returns a collection of failed Jobs.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no jobs found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Job
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_failed_jobs(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/job/failing")
def get_failing_jobs(username, m_wf_id, wf_id):
"""
Returns a collection of failing Jobs.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no jobs found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Job
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_failing_jobs(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
| 29.28941
| 117
| 0.670062
| 4,548
| 36,231
| 5.134125
| 0.058267
| 0.026039
| 0.015632
| 0.008394
| 0.829165
| 0.815717
| 0.798801
| 0.775332
| 0.763854
| 0.744968
| 0
| 0.021308
| 0.230576
| 36,231
| 1,236
| 118
| 29.313107
| 0.8163
| 0.345478
| 0
| 0.472222
| 0
| 0.012346
| 0.17482
| 0.096065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101852
| false
| 0
| 0.024691
| 0.006173
| 0.277778
| 0.095679
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab0b6dc773684d4f3bd83820371c20c62a35f06b
| 100
|
py
|
Python
|
tests/test_elliptic_moab.py
|
padmec-reservoir/elliptic_moab
|
a3b15f29a621c35a8279fd163326a0895aa67f30
|
[
"MIT"
] | null | null | null |
tests/test_elliptic_moab.py
|
padmec-reservoir/elliptic_moab
|
a3b15f29a621c35a8279fd163326a0895aa67f30
|
[
"MIT"
] | null | null | null |
tests/test_elliptic_moab.py
|
padmec-reservoir/elliptic_moab
|
a3b15f29a621c35a8279fd163326a0895aa67f30
|
[
"MIT"
] | null | null | null |
"""Tests for `elliptic_moab` package."""
import pytest
from elliptic_moab import elliptic_moab
| 11.111111
| 40
| 0.76
| 13
| 100
| 5.615385
| 0.615385
| 0.493151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 100
| 8
| 41
| 12.5
| 0.858824
| 0.34
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab40272165d81a1f866b3a624c48985c6ede0962
| 63
|
py
|
Python
|
club_poker/www/design.py
|
bobzz-zone/Rico
|
9b97ea80d25e42e3c4dd55485d6f05172ec9ebac
|
[
"MIT"
] | null | null | null |
club_poker/www/design.py
|
bobzz-zone/Rico
|
9b97ea80d25e42e3c4dd55485d6f05172ec9ebac
|
[
"MIT"
] | null | null | null |
club_poker/www/design.py
|
bobzz-zone/Rico
|
9b97ea80d25e42e3c4dd55485d6f05172ec9ebac
|
[
"MIT"
] | null | null | null |
import frappe
def get_context(context):
context.test="Hello"
| 12.6
| 25
| 0.777778
| 9
| 63
| 5.333333
| 0.777778
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 63
| 5
| 26
| 12.6
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab4f5a5c21ca21f3c5b799e2e069bb8573b00484
| 1,175
|
py
|
Python
|
tests/base/dataloaders.py
|
Kshitij09/pytorch-lightning
|
63bd0582e35ad865c1f07f61975456f65de0f41f
|
[
"Apache-2.0"
] | null | null | null |
tests/base/dataloaders.py
|
Kshitij09/pytorch-lightning
|
63bd0582e35ad865c1f07f61975456f65de0f41f
|
[
"Apache-2.0"
] | null | null | null |
tests/base/dataloaders.py
|
Kshitij09/pytorch-lightning
|
63bd0582e35ad865c1f07f61975456f65de0f41f
|
[
"Apache-2.0"
] | null | null | null |
"""Custom dataloaders for testing"""
class CustomInfDataloader:
def __init__(self, dataloader):
self.dataloader = dataloader
self.iter = iter(dataloader)
self.count = 0
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count >= 50:
raise StopIteration
self.count = self.count + 1
try:
return next(self.iter)
except StopIteration:
self.iter = iter(self.dataloader)
return next(self.iter)
class CustomNotImplementedErrorDataloader:
def __init__(self, dataloader):
self.dataloader = dataloader
self.iter = iter(dataloader)
self.count = 0
def __len__(self):
"""raise NotImplementedError"""
raise NotImplementedError
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count >= 50:
raise StopIteration
self.count = self.count + 1
try:
return next(self.iter)
except StopIteration:
self.iter = iter(self.dataloader)
return next(self.iter)
| 23.5
| 45
| 0.581277
| 119
| 1,175
| 5.504202
| 0.210084
| 0.137405
| 0.073282
| 0.109924
| 0.777099
| 0.777099
| 0.777099
| 0.777099
| 0.777099
| 0.777099
| 0
| 0.012788
| 0.334468
| 1,175
| 49
| 46
| 23.979592
| 0.824808
| 0.04766
| 0
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.194444
| false
| 0
| 0
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbb61931b15daf7123c15864f807ddfb9c69bdcb
| 5,213
|
py
|
Python
|
player-statistics/tests/test_update_player_statistics.py
|
OpenMatchmaking/microservice-player-statistics
|
2f265df73a93483c5ac6ee8ef06a8611ecfba415
|
[
"BSD-3-Clause"
] | 4
|
2018-09-04T07:09:53.000Z
|
2021-08-13T10:04:20.000Z
|
player-statistics/tests/test_update_player_statistics.py
|
OpenMatchmaking/microservice-player-statistics
|
2f265df73a93483c5ac6ee8ef06a8611ecfba415
|
[
"BSD-3-Clause"
] | 1
|
2019-01-01T09:40:42.000Z
|
2019-01-01T09:40:42.000Z
|
player-statistics/tests/test_update_player_statistics.py
|
OpenMatchmaking/microservice-player-statistics
|
2f265df73a93483c5ac6ee8ef06a8611ecfba415
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import deepcopy
import pytest
from bson import ObjectId
from sage_utils.amqp.clients import RpcAmqpClient
from sage_utils.constants import VALIDATION_ERROR, NOT_FOUND_ERROR
from sage_utils.wrappers import Response
from app.statistics.documents import PlayerStatistic
from app.workers.update_player_statistics import UpdatePlayerStatisticsWorker
REQUEST_QUEUE = UpdatePlayerStatisticsWorker.QUEUE_NAME
REQUEST_EXCHANGE = UpdatePlayerStatisticsWorker.REQUEST_EXCHANGE_NAME
RESPONSE_EXCHANGE = UpdatePlayerStatisticsWorker.RESPONSE_EXCHANGE_NAME
PLAYER_NOT_FOUND_ERROR = UpdatePlayerStatisticsWorker.PLAYER_NOT_FOUND_ERROR
@pytest.mark.asyncio
async def test_worker_returns_updated_player_statistics(sanic_server):
await PlayerStatistic.collection.delete_many({})
player_id = str(ObjectId())
create_data = {
'player_id': player_id,
'total_games': 10,
'wins': 5,
'loses': 5,
'rating': 2500
}
object = PlayerStatistic(**create_data)
await object.commit()
players_count = await PlayerStatistic.collection.count_documents({})
assert players_count == 1
update_data = deepcopy(create_data)
update_data.update({
'total_games': 12,
'wins': 6,
'loses': 6,
'rating': 2500
})
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload=update_data)
assert Response.EVENT_FIELD_NAME in response.keys()
assert Response.CONTENT_FIELD_NAME in response.keys()
content = response[Response.CONTENT_FIELD_NAME]
assert len(list(content.keys())) == 6
assert set(content.keys()) == {'id', 'player_id', 'total_games', 'wins', 'loses', 'rating'}
assert content['player_id'] == update_data['player_id']
assert content['total_games'] == update_data['total_games']
assert content['wins'] == update_data['wins']
assert content['loses'] == update_data['loses']
assert content['rating'] == update_data['rating']
await PlayerStatistic.collection.delete_many({})
@pytest.mark.asyncio
async def test_worker_returns_an_error_for_extra_fields_by_default(sanic_server):
await PlayerStatistic.collection.delete_many({})
player_id = str(ObjectId())
create_data = {
'player_id': player_id,
'total_games': 10,
'wins': 5,
'loses': 5,
'rating': 2500
}
object = PlayerStatistic(**create_data)
await object.commit()
players_count = await PlayerStatistic.collection.count_documents({})
assert players_count == 1
update_data = deepcopy(create_data)
update_data.update({
'total_games': 12,
'wins': 6,
'loses': 6,
'rating': 2500,
'winrate': 50,
'nickname': 'user'
})
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload=update_data)
assert Response.EVENT_FIELD_NAME in response.keys()
assert Response.ERROR_FIELD_NAME in response.keys()
error = response[Response.ERROR_FIELD_NAME]
assert len(list(error.keys())) == 2
assert set(error.keys()) == {'type', 'details'}
assert error['type'] == VALIDATION_ERROR
assert len(error['details']) == 1
assert len(error['details']['_schema']) == 2
assert set(error['details']['_schema']) == {
'Unknown field name nickname.',
'Unknown field name winrate.'
}
await PlayerStatistic.collection.delete_many({})
@pytest.mark.asyncio
async def test_worker_returns_no_changed_player_statistics(sanic_server):
await PlayerStatistic.collection.delete_many({})
player_id = str(ObjectId())
create_data = {
'player_id': player_id,
'total_games': 10,
'wins': 5,
'loses': 5,
'rating': 2500
}
object = PlayerStatistic(**create_data)
await object.commit()
players_count = await PlayerStatistic.collection.count_documents({})
assert players_count == 1
update_data = deepcopy(create_data)
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload=update_data)
assert Response.EVENT_FIELD_NAME in response.keys()
assert Response.CONTENT_FIELD_NAME in response.keys()
content = response[Response.CONTENT_FIELD_NAME]
assert len(list(content.keys())) == 6
assert set(content.keys()) == {'id', 'player_id', 'total_games', 'wins', 'loses', 'rating'}
assert content['player_id'] == update_data['player_id']
assert content['total_games'] == update_data['total_games']
assert content['wins'] == update_data['wins']
assert content['loses'] == update_data['loses']
assert content['rating'] == update_data['rating']
await PlayerStatistic.collection.delete_many({})
| 31.403614
| 95
| 0.689814
| 590
| 5,213
| 5.827119
| 0.161017
| 0.052356
| 0.078534
| 0.062827
| 0.755963
| 0.742874
| 0.742874
| 0.742874
| 0.730657
| 0.730657
| 0
| 0.01193
| 0.196048
| 5,213
| 165
| 96
| 31.593939
| 0.808399
| 0
| 0
| 0.736842
| 0
| 0
| 0.094955
| 0
| 0
| 0
| 0
| 0
| 0.218045
| 1
| 0
| false
| 0
| 0.06015
| 0
| 0.06015
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbcf41f91aa0e8cab2f46acb91b2e2ac3678818f
| 59,442
|
py
|
Python
|
mmtbx/regression/tst_angle.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tst_angle.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tst_angle.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from libtbx import easy_run
from libtbx.test_utils import approx_equal
import time
# angle = 90
pdb_str_1 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 0.000 1.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 2.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 3.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 4.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 5.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 1.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 2.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 3.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 4.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 5.000 0.000 0.000 1.00 20.00 O
END
"""
pdb_str_2 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 1.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 2.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 3.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 4.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 5.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 1.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 2.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 3.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 4.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 5.000 1.00 20.00 O
END
"""
pdb_str_3 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 1.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 2.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 3.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 4.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 5.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 1.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 2.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 3.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 4.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 5.000 0.000 1.00 20.00 O
END
"""
# angle = 0
pdb_str_4 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 1.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 2.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 3.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 4.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 5.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 11.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 12.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 13.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 14.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 15.000 0.000 0.000 1.00 20.00 O
END
"""
pdb_str_5 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 0.000 1.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 2.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 3.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 4.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 5.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 11.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 12.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 13.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 14.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 15.000 0.000 1.00 20.00 O
END
"""
pdb_str_6 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 0.000 0.000 1.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 2.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 3.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 4.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 5.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 11.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 12.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 13.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 14.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 15.000 1.00 20.00 O
END
"""
pdb_str_7 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 0.000 0.000 0.000 1.00 20.00 O
END
"""
# 45
pdb_str_8 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 1.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 2.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 3.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 4.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 5.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 1.000 1.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 2.000 2.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 3.000 3.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 4.000 4.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 5.000 5.000 0.000 1.00 20.00 O
END
"""
pdb_str_9 = """\
CRYST1 15.000 15.000 15.000 80.00 70.00 100.00 P 1
HETATM 1 O HOH X 1 1.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 2.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 3.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 4.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH X 1 5.000 0.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 -1.000 -1.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 -2.000 -2.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 -3.000 -3.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 -4.000 -4.000 0.000 1.00 20.00 O
HETATM 1 O HOH Y 1 -5.000 -5.000 0.000 1.00 20.00 O
END
"""
# real case
pdb_str_10 = """\
CRYST1 167.218 167.218 80.113 90.00 90.00 120.00 H 3
ATOM 1 P DA X 1 -22.252 -4.076 16.901 1.00 30.00 P
ATOM 2 OP1 DA X 1 -21.894 -2.937 17.777 1.00 30.00 O
ATOM 3 OP2 DA X 1 -22.778 -3.816 15.541 1.00 30.00 O
ATOM 4 O5' DA X 1 -20.992 -5.062 16.794 1.00 30.00 O
ATOM 5 C5' DA X 1 -20.219 -5.368 17.962 1.00 30.00 C
ATOM 6 C4' DA X 1 -18.753 -5.626 17.619 1.00 30.00 C
ATOM 7 O4' DA X 1 -18.660 -6.677 16.626 1.00 30.00 O
ATOM 8 C3' DA X 1 -18.001 -4.445 17.014 1.00 30.00 C
ATOM 9 O3' DA X 1 -16.607 -4.570 17.262 1.00 30.00 O
ATOM 10 C2' DA X 1 -18.289 -4.614 15.538 1.00 30.00 C
ATOM 11 C1' DA X 1 -18.218 -6.130 15.397 1.00 30.00 C
ATOM 12 N9 DA X 1 -19.076 -6.629 14.347 1.00 30.00 N
ATOM 13 C8 DA X 1 -20.403 -6.936 14.445 1.00 30.00 C
ATOM 14 N7 DA X 1 -20.928 -7.358 13.325 1.00 30.00 N
ATOM 15 C5 DA X 1 -19.871 -7.315 12.432 1.00 30.00 C
ATOM 16 C6 DA X 1 -19.774 -7.635 11.075 1.00 30.00 C
ATOM 17 N6 DA X 1 -20.805 -8.088 10.364 1.00 30.00 N
ATOM 18 N1 DA X 1 -18.572 -7.478 10.476 1.00 30.00 N
ATOM 19 C2 DA X 1 -17.541 -7.038 11.204 1.00 30.00 C
ATOM 20 N3 DA X 1 -17.514 -6.699 12.489 1.00 30.00 N
ATOM 21 C4 DA X 1 -18.724 -6.865 13.048 1.00 30.00 C
ATOM 22 P DA X 2 -15.623 -3.332 16.967 1.00 30.00 P
ATOM 23 OP1 DA X 2 -14.608 -3.313 18.042 1.00 30.00 O
ATOM 24 OP2 DA X 2 -16.467 -2.142 16.723 1.00 30.00 O
ATOM 25 O5' DA X 2 -14.901 -3.698 15.585 1.00 30.00 O
ATOM 26 C5' DA X 2 -13.821 -2.886 15.121 1.00 30.00 C
ATOM 27 C4' DA X 2 -13.517 -3.152 13.657 1.00 30.00 C
ATOM 28 O4' DA X 2 -14.632 -3.871 13.053 1.00 30.00 O
ATOM 29 C3' DA X 2 -13.327 -1.887 12.813 1.00 30.00 C
ATOM 30 O3' DA X 2 -12.303 -2.071 11.822 1.00 30.00 O
ATOM 31 C2' DA X 2 -14.693 -1.723 12.174 1.00 30.00 C
ATOM 32 C1' DA X 2 -15.061 -3.170 11.912 1.00 30.00 C
ATOM 33 N9 DA X 2 -16.493 -3.362 11.737 1.00 30.00 N
ATOM 34 C8 DA X 2 -17.485 -3.036 12.619 1.00 30.00 C
ATOM 35 N7 DA X 2 -18.691 -3.300 12.177 1.00 30.00 N
ATOM 36 C5 DA X 2 -18.472 -3.826 10.915 1.00 30.00 C
ATOM 37 C6 DA X 2 -19.343 -4.306 9.920 1.00 30.00 C
ATOM 38 N6 DA X 2 -20.675 -4.334 10.048 1.00 30.00 N
ATOM 39 N1 DA X 2 -18.792 -4.754 8.777 1.00 30.00 N
ATOM 40 C2 DA X 2 -17.463 -4.734 8.643 1.00 30.00 C
ATOM 41 N3 DA X 2 -16.545 -4.315 9.505 1.00 30.00 N
ATOM 42 C4 DA X 2 -17.121 -3.863 10.629 1.00 30.00 C
ATOM 43 P DG X 3 -11.350 -0.840 11.404 1.00 30.00 P
ATOM 44 OP1 DG X 3 -9.966 -1.340 11.327 1.00 30.00 O
ATOM 45 OP2 DG X 3 -11.663 0.296 12.295 1.00 30.00 O
ATOM 46 O5' DG X 3 -11.812 -0.444 9.923 1.00 30.00 O
ATOM 47 C5' DG X 3 -13.083 0.157 9.706 1.00 30.00 C
ATOM 48 C4' DG X 3 -13.884 -0.639 8.690 1.00 30.00 C
ATOM 49 O4' DG X 3 -15.169 -0.989 9.235 1.00 30.00 O
ATOM 50 C3' DG X 3 -14.199 0.103 7.398 1.00 30.00 C
ATOM 51 O3' DG X 3 -13.216 -0.199 6.401 1.00 30.00 O
ATOM 52 C2' DG X 3 -15.607 -0.386 7.002 1.00 30.00 C
ATOM 53 C1' DG X 3 -16.033 -1.286 8.169 1.00 30.00 C
ATOM 54 N9 DG X 3 -17.411 -1.063 8.603 1.00 30.00 N
ATOM 55 C8 DG X 3 -17.825 -0.414 9.740 1.00 30.00 C
ATOM 56 N7 DG X 3 -19.121 -0.359 9.863 1.00 30.00 N
ATOM 57 C5 DG X 3 -19.598 -1.012 8.734 1.00 30.00 C
ATOM 58 C6 DG X 3 -20.927 -1.261 8.328 1.00 30.00 C
ATOM 59 O6 DG X 3 -21.966 -0.934 8.909 1.00 30.00 O
ATOM 60 N1 DG X 3 -20.980 -1.967 7.118 1.00 30.00 N
ATOM 61 C2 DG X 3 -19.869 -2.356 6.401 1.00 30.00 C
ATOM 62 N2 DG X 3 -20.096 -3.006 5.259 1.00 30.00 N
ATOM 63 N3 DG X 3 -18.618 -2.126 6.780 1.00 30.00 N
ATOM 64 C4 DG X 3 -18.559 -1.450 7.949 1.00 30.00 C
ATOM 65 P DC X 4 -13.188 -1.621 5.640 1.00 30.00 P
ATOM 66 OP1 DC X 4 -11.804 -1.757 5.138 1.00 30.00 O
ATOM 67 OP2 DC X 4 -13.754 -2.709 6.473 1.00 30.00 O
ATOM 68 O5' DC X 4 -14.144 -1.403 4.375 1.00 30.00 O
ATOM 69 C5' DC X 4 -13.955 -2.190 3.197 1.00 30.00 C
ATOM 70 C4' DC X 4 -14.524 -1.482 1.984 1.00 30.00 C
ATOM 71 O4' DC X 4 -15.947 -1.775 1.887 1.00 30.00 O
ATOM 72 C3' DC X 4 -14.427 0.027 2.048 1.00 30.00 C
ATOM 73 O3' DC X 4 -13.164 0.505 1.487 1.00 30.00 O
ATOM 74 C2' DC X 4 -15.632 0.468 1.213 1.00 30.00 C
ATOM 75 C1' DC X 4 -16.668 -0.622 1.488 1.00 30.00 C
ATOM 76 N1 DC X 4 -17.683 -0.282 2.571 1.00 30.00 N
ATOM 77 C2 DC X 4 -19.052 -0.141 2.251 1.00 30.00 C
ATOM 78 O2 DC X 4 -19.417 -0.277 1.076 1.00 30.00 O
ATOM 79 N3 DC X 4 -19.938 0.151 3.250 1.00 30.00 N
ATOM 80 C4 DC X 4 -19.490 0.294 4.507 1.00 30.00 C
ATOM 81 N4 DC X 4 -20.370 0.575 5.469 1.00 30.00 N
ATOM 82 C5 DC X 4 -18.116 0.152 4.836 1.00 30.00 C
ATOM 83 C6 DC X 4 -17.262 -0.140 3.857 1.00 30.00 C
ATOM 84 P DC X 5 -11.972 1.062 2.428 1.00 30.00 P
ATOM 85 OP1 DC X 5 -10.698 0.780 1.730 1.00 30.00 O
ATOM 86 OP2 DC X 5 -12.176 0.591 3.811 1.00 30.00 O
ATOM 87 O5' DC X 5 -12.164 2.653 2.443 1.00 30.00 O
ATOM 88 C5' DC X 5 -12.886 3.290 3.505 1.00 30.00 C
ATOM 89 C4' DC X 5 -14.372 3.021 3.359 1.00 30.00 C
ATOM 90 O4' DC X 5 -15.004 2.870 4.653 1.00 30.00 O
ATOM 91 C3' DC X 5 -15.202 4.073 2.597 1.00 30.00 C
ATOM 92 O3' DC X 5 -16.015 3.409 1.614 1.00 30.00 O
ATOM 93 C2' DC X 5 -16.078 4.655 3.707 1.00 30.00 C
ATOM 94 C1' DC X 5 -16.290 3.402 4.527 1.00 30.00 C
ATOM 95 N1 DC X 5 -16.869 3.620 5.862 1.00 30.00 N
ATOM 96 C2 DC X 5 -18.253 3.538 6.020 1.00 30.00 C
ATOM 97 O2 DC X 5 -18.948 3.302 5.034 1.00 30.00 O
ATOM 98 N3 DC X 5 -18.791 3.724 7.249 1.00 30.00 N
ATOM 99 C4 DC X 5 -17.988 3.980 8.286 1.00 30.00 C
ATOM 100 N4 DC X 5 -18.551 4.159 9.485 1.00 30.00 N
ATOM 101 C5 DC X 5 -16.569 4.060 8.141 1.00 30.00 C
ATOM 102 C6 DC X 5 -16.058 3.872 6.922 1.00 30.00 C
ATOM 103 P DG X 6 -15.767 3.603 0.033 1.00 30.00 P
ATOM 104 OP1 DG X 6 -14.509 2.906 -0.328 1.00 30.00 O
ATOM 105 OP2 DG X 6 -15.921 5.044 -0.249 1.00 30.00 O
ATOM 106 O5' DG X 6 -17.002 2.829 -0.647 1.00 30.00 O
ATOM 107 C5' DG X 6 -17.284 2.994 -2.042 1.00 30.00 C
ATOM 108 C4' DG X 6 -18.787 2.987 -2.306 1.00 30.00 C
ATOM 109 O4' DG X 6 -19.502 2.825 -1.052 1.00 30.00 O
ATOM 110 C3' DG X 6 -19.337 4.270 -2.919 1.00 30.00 C
ATOM 111 O3' DG X 6 -20.489 3.998 -3.714 1.00 30.00 O
ATOM 112 C2' DG X 6 -19.704 5.076 -1.690 1.00 30.00 C
ATOM 113 C1' DG X 6 -20.237 4.002 -0.758 1.00 30.00 C
ATOM 114 N9 DG X 6 -20.026 4.326 0.643 1.00 30.00 N
ATOM 115 C8 DG X 6 -18.900 4.058 1.374 1.00 30.00 C
ATOM 116 N7 DG X 6 -18.972 4.464 2.602 1.00 30.00 N
ATOM 117 C5 DG X 6 -20.227 5.042 2.703 1.00 30.00 C
ATOM 118 C6 DG X 6 -20.854 5.652 3.812 1.00 30.00 C
ATOM 119 O6 DG X 6 -20.404 5.795 4.956 1.00 30.00 O
ATOM 120 N1 DG X 6 -22.130 6.120 3.494 1.00 30.00 N
ATOM 121 C2 DG X 6 -22.721 5.994 2.251 1.00 30.00 C
ATOM 122 N2 DG X 6 -23.953 6.491 2.121 1.00 30.00 N
ATOM 123 N3 DG X 6 -22.140 5.425 1.205 1.00 30.00 N
ATOM 124 C4 DG X 6 -20.896 4.972 1.503 1.00 30.00 C
ATOM 125 P DA X 7 -21.090 5.134 -4.683 1.00 30.00 P
ATOM 126 OP1 DA X 7 -22.135 4.497 -5.514 1.00 30.00 O
ATOM 127 OP2 DA X 7 -19.954 5.813 -5.345 1.00 30.00 O
ATOM 128 O5' DA X 7 -21.796 6.179 -3.688 1.00 30.00 O
ATOM 129 C5' DA X 7 -23.114 5.920 -3.197 1.00 30.00 C
ATOM 130 C4' DA X 7 -23.593 7.012 -2.242 1.00 30.00 C
ATOM 131 O4' DA X 7 -22.746 7.060 -1.062 1.00 30.00 O
ATOM 132 C3' DA X 7 -23.594 8.437 -2.795 1.00 30.00 C
ATOM 133 O3' DA X 7 -24.673 9.164 -2.201 1.00 30.00 O
ATOM 134 C2' DA X 7 -22.251 8.963 -2.291 1.00 30.00 C
ATOM 135 C1' DA X 7 -22.273 8.380 -0.892 1.00 30.00 C
ATOM 136 N9 DA X 7 -20.981 8.320 -0.219 1.00 30.00 N
ATOM 137 C8 DA X 7 -19.767 8.039 -0.769 1.00 30.00 C
ATOM 138 N7 DA X 7 -18.782 8.022 0.099 1.00 30.00 N
ATOM 139 C5 DA X 7 -19.397 8.323 1.299 1.00 30.00 C
ATOM 140 C6 DA X 7 -18.911 8.472 2.612 1.00 30.00 C
ATOM 141 N6 DA X 7 -17.636 8.325 2.966 1.00 30.00 N
ATOM 142 N1 DA X 7 -19.793 8.770 3.558 1.00 30.00 N
ATOM 143 C2 DA X 7 -21.076 8.919 3.236 1.00 30.00 C
ATOM 144 N3 DA X 7 -21.653 8.810 2.050 1.00 30.00 N
ATOM 145 C4 DA X 7 -20.752 8.504 1.116 1.00 30.00 C
ATOM 146 P DG X 8 -26.131 9.188 -2.879 1.00 30.00 P
ATOM 147 OP1 DG X 8 -26.599 7.790 -3.027 1.00 30.00 O
ATOM 148 OP2 DG X 8 -26.051 10.074 -4.061 1.00 30.00 O
ATOM 149 O5' DG X 8 -27.041 9.918 -1.784 1.00 30.00 O
ATOM 150 C5' DG X 8 -27.592 9.169 -0.717 1.00 30.00 C
ATOM 151 C4' DG X 8 -27.340 9.849 0.620 1.00 30.00 C
ATOM 152 O4' DG X 8 -25.917 9.834 0.931 1.00 30.00 O
ATOM 153 C3' DG X 8 -27.778 11.325 0.708 1.00 30.00 C
ATOM 154 O3' DG X 8 -28.595 11.521 1.869 1.00 30.00 O
ATOM 155 C2' DG X 8 -26.448 12.076 0.826 1.00 30.00 C
ATOM 156 C1' DG X 8 -25.614 11.049 1.562 1.00 30.00 C
ATOM 157 N9 DG X 8 -24.168 11.305 1.539 1.00 30.00 N
ATOM 158 C8 DG X 8 -23.311 11.270 0.464 1.00 30.00 C
ATOM 159 N7 DG X 8 -22.073 11.564 0.779 1.00 30.00 N
ATOM 160 C5 DG X 8 -22.122 11.812 2.146 1.00 30.00 C
ATOM 161 C6 DG X 8 -21.095 12.173 3.054 1.00 30.00 C
ATOM 162 O6 DG X 8 -19.893 12.353 2.819 1.00 30.00 O
ATOM 163 N1 DG X 8 -21.587 12.326 4.354 1.00 30.00 N
ATOM 164 C2 DG X 8 -22.906 12.135 4.715 1.00 30.00 C
ATOM 165 N2 DG X 8 -23.224 12.330 6.003 1.00 30.00 N
ATOM 166 N3 DG X 8 -23.860 11.803 3.873 1.00 30.00 N
ATOM 167 C4 DG X 8 -23.401 11.650 2.619 1.00 30.00 C
ATOM 168 P DT X 9 -29.453 12.870 2.063 1.00 30.00 P
ATOM 169 OP1 DT X 9 -30.263 12.712 3.292 1.00 30.00 O
ATOM 170 OP2 DT X 9 -30.107 13.189 0.774 1.00 30.00 O
ATOM 171 O5' DT X 9 -28.352 13.995 2.339 1.00 30.00 O
ATOM 172 C5' DT X 9 -28.636 15.075 3.221 1.00 30.00 C
ATOM 173 C4' DT X 9 -27.778 14.972 4.468 1.00 30.00 C
ATOM 174 O4' DT X 9 -26.621 14.160 4.170 1.00 30.00 O
ATOM 175 C3' DT X 9 -27.212 16.288 4.975 1.00 30.00 C
ATOM 176 O3' DT X 9 -28.112 16.899 5.889 1.00 30.00 O
ATOM 177 C2' DT X 9 -25.938 15.854 5.686 1.00 30.00 C
ATOM 178 C1' DT X 9 -25.518 14.599 4.938 1.00 30.00 C
ATOM 179 N1 DT X 9 -24.342 14.789 4.043 1.00 30.00 N
ATOM 180 C2 DT X 9 -23.128 15.119 4.594 1.00 30.00 C
ATOM 181 O2 DT X 9 -22.951 15.286 5.789 1.00 30.00 O
ATOM 182 N3 DT X 9 -22.118 15.240 3.696 1.00 30.00 N
ATOM 183 C4 DT X 9 -22.174 15.085 2.332 1.00 30.00 C
ATOM 184 O4 DT X 9 -21.182 15.217 1.621 1.00 30.00 O
ATOM 185 C5 DT X 9 -23.475 14.739 1.809 1.00 30.00 C
ATOM 186 C7 DT X 9 -23.672 14.538 0.336 1.00 30.00 C
ATOM 187 C6 DT X 9 -24.486 14.612 2.682 1.00 30.00 C
ATOM 188 P DA X 10 -27.979 18.466 6.227 1.00 30.00 P
ATOM 189 OP1 DA X 10 -29.022 18.792 7.222 1.00 30.00 O
ATOM 190 OP2 DA X 10 -27.904 19.205 4.949 1.00 30.00 O
ATOM 191 O5' DA X 10 -26.558 18.606 6.940 1.00 30.00 O
ATOM 192 C5' DA X 10 -26.339 18.016 8.207 1.00 30.00 C
ATOM 193 C4' DA X 10 -24.867 18.045 8.546 1.00 30.00 C
ATOM 194 O4' DA X 10 -24.094 17.586 7.400 1.00 30.00 O
ATOM 195 C3' DA X 10 -24.330 19.435 8.879 1.00 30.00 C
ATOM 196 O3' DA X 10 -23.425 19.350 9.970 1.00 30.00 O
ATOM 197 C2' DA X 10 -23.625 19.848 7.590 1.00 30.00 C
ATOM 198 C1' DA X 10 -23.069 18.514 7.144 1.00 30.00 C
ATOM 199 N9 DA X 10 -22.696 18.462 5.734 1.00 30.00 N
ATOM 200 C8 DA X 10 -23.502 18.207 4.659 1.00 30.00 C
ATOM 201 N7 DA X 10 -22.865 18.222 3.507 1.00 30.00 N
ATOM 202 C5 DA X 10 -21.556 18.517 3.856 1.00 30.00 C
ATOM 203 C6 DA X 10 -20.370 18.684 3.100 1.00 30.00 C
ATOM 204 N6 DA X 10 -20.307 18.581 1.756 1.00 30.00 N
ATOM 205 N1 DA X 10 -19.243 18.974 3.781 1.00 30.00 N
ATOM 206 C2 DA X 10 -19.299 19.093 5.112 1.00 30.00 C
ATOM 207 N3 DA X 10 -20.339 18.955 5.920 1.00 30.00 N
ATOM 208 C4 DA X 10 -21.444 18.665 5.226 1.00 30.00 C
ATOM 209 P DC X 11 -22.654 20.665 10.476 1.00 30.00 P
ATOM 210 OP1 DC X 11 -22.184 20.417 11.858 1.00 30.00 O
ATOM 211 OP2 DC X 11 -23.515 21.834 10.194 1.00 30.00 O
ATOM 212 O5' DC X 11 -21.390 20.753 9.512 1.00 30.00 O
ATOM 213 C5' DC X 11 -20.143 20.261 9.943 1.00 30.00 C
ATOM 214 C4' DC X 11 -19.031 21.189 9.499 1.00 30.00 C
ATOM 215 O4' DC X 11 -18.892 21.121 8.058 1.00 30.00 O
ATOM 216 C3' DC X 11 -19.251 22.661 9.821 1.00 30.00 C
ATOM 217 O3' DC X 11 -18.008 23.272 10.064 1.00 30.00 O
ATOM 218 C2' DC X 11 -19.884 23.195 8.540 1.00 30.00 C
ATOM 219 C1' DC X 11 -19.138 22.393 7.490 1.00 30.00 C
ATOM 220 N1 DC X 11 -19.907 22.199 6.246 1.00 30.00 N
ATOM 221 C2 DC X 11 -19.240 22.236 5.028 1.00 30.00 C
ATOM 222 O2 DC X 11 -18.023 22.420 5.029 1.00 30.00 O
ATOM 223 N3 DC X 11 -19.945 22.057 3.885 1.00 30.00 N
ATOM 224 C4 DC X 11 -21.268 21.855 3.945 1.00 30.00 C
ATOM 225 N4 DC X 11 -21.927 21.686 2.796 1.00 30.00 N
ATOM 226 C5 DC X 11 -21.971 21.825 5.187 1.00 30.00 C
ATOM 227 C6 DC X 11 -21.256 22.000 6.303 1.00 30.00 C
ATOM 228 P DG X 12 -17.936 24.675 10.835 1.00 30.00 P
ATOM 229 OP1 DG X 12 -17.261 24.429 12.125 1.00 30.00 O
ATOM 230 OP2 DG X 12 -19.279 25.294 10.802 1.00 30.00 O
ATOM 231 O5' DG X 12 -16.959 25.539 9.927 1.00 30.00 O
ATOM 232 C5' DG X 12 -15.771 24.949 9.420 1.00 30.00 C
ATOM 233 C4' DG X 12 -15.171 25.827 8.349 1.00 30.00 C
ATOM 234 O4' DG X 12 -15.672 25.412 7.049 1.00 30.00 O
ATOM 235 C3' DG X 12 -15.529 27.307 8.476 1.00 30.00 C
ATOM 236 O3' DG X 12 -14.457 28.117 7.995 1.00 30.00 O
ATOM 237 C2' DG X 12 -16.751 27.412 7.580 1.00 30.00 C
ATOM 238 C1' DG X 12 -16.335 26.497 6.442 1.00 30.00 C
ATOM 239 N9 DG X 12 -17.460 26.000 5.662 1.00 30.00 N
ATOM 240 C8 DG X 12 -18.685 25.594 6.145 1.00 30.00 C
ATOM 241 N7 DG X 12 -19.507 25.210 5.211 1.00 30.00 N
ATOM 242 C5 DG X 12 -18.782 25.368 4.029 1.00 30.00 C
ATOM 243 C6 DG X 12 -19.159 25.107 2.684 1.00 30.00 C
ATOM 244 O6 DG X 12 -20.241 24.661 2.265 1.00 30.00 O
ATOM 245 N1 DG X 12 -18.128 25.410 1.782 1.00 30.00 N
ATOM 246 C2 DG X 12 -16.886 25.901 2.151 1.00 30.00 C
ATOM 247 N2 DG X 12 -16.019 26.133 1.162 1.00 30.00 N
ATOM 248 N3 DG X 12 -16.521 26.144 3.405 1.00 30.00 N
ATOM 249 C4 DG X 12 -17.516 25.850 4.291 1.00 30.00 C
ATOM 250 P DG X 13 -16.206 33.094 5.526 1.00 30.00 P
ATOM 251 OP1 DG X 13 -15.400 33.893 4.576 1.00 30.00 O
ATOM 252 OP2 DG X 13 -17.594 32.760 5.161 1.00 30.00 O
ATOM 253 O5' DG X 13 -15.466 31.703 5.852 1.00 30.00 O
ATOM 254 C5' DG X 13 -16.223 30.521 6.196 1.00 30.00 C
ATOM 255 C4' DG X 13 -16.386 29.628 5.001 1.00 30.00 C
ATOM 256 O4' DG X 13 -17.755 29.191 4.901 1.00 30.00 O
ATOM 257 C3' DG X 13 -16.062 30.300 3.680 1.00 30.00 C
ATOM 258 O3' DG X 13 -14.854 29.789 3.180 1.00 30.00 O
ATOM 259 C2' DG X 13 -17.255 29.974 2.774 1.00 30.00 C
ATOM 260 C1' DG X 13 -17.997 28.904 3.559 1.00 30.00 C
ATOM 261 N9 DG X 13 -19.432 28.876 3.319 1.00 30.00 N
ATOM 262 C8 DG X 13 -20.434 28.811 4.257 1.00 30.00 C
ATOM 263 N7 DG X 13 -21.626 28.799 3.732 1.00 30.00 N
ATOM 264 C5 DG X 13 -21.395 28.857 2.360 1.00 30.00 C
ATOM 265 C6 DG X 13 -22.311 28.873 1.282 1.00 30.00 C
ATOM 266 O6 DG X 13 -23.541 28.825 1.350 1.00 30.00 O
ATOM 267 N1 DG X 13 -21.662 28.947 0.029 1.00 30.00 N
ATOM 268 C2 DG X 13 -20.285 28.975 -0.111 1.00 30.00 C
ATOM 269 N2 DG X 13 -19.804 29.036 -1.346 1.00 30.00 N
ATOM 270 N3 DG X 13 -19.434 28.958 0.897 1.00 30.00 N
ATOM 271 C4 DG X 13 -20.053 28.901 2.095 1.00 30.00 C
ATOM 272 P DG X 14 -13.681 30.798 2.758 1.00 30.00 P
ATOM 273 OP1 DG X 14 -12.397 30.073 2.863 1.00 30.00 O
ATOM 274 OP2 DG X 14 -13.858 32.067 3.496 1.00 30.00 O
ATOM 275 O5' DG X 14 -13.987 31.071 1.224 1.00 30.00 O
ATOM 276 C5' DG X 14 -14.487 30.022 0.406 1.00 30.00 C
ATOM 277 C4' DG X 14 -15.244 30.611 -0.756 1.00 30.00 C
ATOM 278 O4' DG X 14 -16.673 30.563 -0.486 1.00 30.00 O
ATOM 279 C3' DG X 14 -14.937 32.095 -1.004 1.00 30.00 C
ATOM 280 O3' DG X 14 -14.983 32.366 -2.389 1.00 30.00 O
ATOM 281 C2' DG X 14 -16.095 32.773 -0.297 1.00 30.00 C
ATOM 282 C1' DG X 14 -17.175 31.840 -0.770 1.00 30.00 C
ATOM 283 N9 DG X 14 -18.481 32.041 -0.171 1.00 30.00 N
ATOM 284 C8 DG X 14 -18.785 32.361 1.127 1.00 30.00 C
ATOM 285 N7 DG X 14 -20.066 32.500 1.333 1.00 30.00 N
ATOM 286 C5 DG X 14 -20.632 32.270 0.085 1.00 30.00 C
ATOM 287 C6 DG X 14 -21.982 32.284 -0.322 1.00 30.00 C
ATOM 288 O6 DG X 14 -22.979 32.500 0.378 1.00 30.00 O
ATOM 289 N1 DG X 14 -22.116 32.012 -1.695 1.00 30.00 N
ATOM 290 C2 DG X 14 -21.054 31.726 -2.533 1.00 30.00 C
ATOM 291 N2 DG X 14 -21.330 31.471 -3.817 1.00 30.00 N
ATOM 292 N3 DG X 14 -19.799 31.715 -2.145 1.00 30.00 N
ATOM 293 C4 DG X 14 -19.665 31.993 -0.839 1.00 30.00 C
ATOM 294 P DC X 15 -14.354 33.731 -2.949 1.00 30.00 P
ATOM 295 OP1 DC X 15 -13.670 33.436 -4.229 1.00 30.00 O
ATOM 296 OP2 DC X 15 -13.603 34.337 -1.832 1.00 30.00 O
ATOM 297 O5' DC X 15 -15.615 34.667 -3.217 1.00 30.00 O
ATOM 298 C5' DC X 15 -16.025 34.907 -4.538 1.00 30.00 C
ATOM 299 C4' DC X 15 -16.843 33.750 -5.063 1.00 30.00 C
ATOM 300 O4' DC X 15 -17.752 33.314 -4.026 1.00 30.00 O
ATOM 301 C3' DC X 15 -17.757 34.132 -6.200 1.00 30.00 C
ATOM 302 O3' DC X 15 -18.273 32.978 -6.799 1.00 30.00 O
ATOM 303 C2' DC X 15 -18.840 34.855 -5.434 1.00 30.00 C
ATOM 304 C1' DC X 15 -19.024 33.879 -4.281 1.00 30.00 C
ATOM 305 N1 DC X 15 -19.544 34.482 -3.029 1.00 30.00 N
ATOM 306 C2 DC X 15 -20.897 34.767 -2.942 1.00 30.00 C
ATOM 307 O2 DC X 15 -21.607 34.536 -3.918 1.00 30.00 O
ATOM 308 N3 DC X 15 -21.394 35.284 -1.796 1.00 30.00 N
ATOM 309 C4 DC X 15 -20.580 35.527 -0.771 1.00 30.00 C
ATOM 310 N4 DC X 15 -21.113 36.054 0.335 1.00 30.00 N
ATOM 311 C5 DC X 15 -19.181 35.240 -0.834 1.00 30.00 C
ATOM 312 C6 DC X 15 -18.708 34.722 -1.976 1.00 30.00 C
ATOM 313 P DT X 16 -18.914 33.076 -8.263 1.00 30.00 P
ATOM 314 OP1 DT X 16 -19.130 31.701 -8.759 1.00 30.00 O
ATOM 315 OP2 DT X 16 -18.062 34.030 -9.009 1.00 30.00 O
ATOM 316 O5' DT X 16 -20.353 33.740 -8.033 1.00 30.00 O
ATOM 317 C5' DT X 16 -21.267 33.776 -9.119 1.00 30.00 C
ATOM 318 C4' DT X 16 -22.576 34.443 -8.741 1.00 30.00 C
ATOM 319 O4' DT X 16 -22.599 34.739 -7.336 1.00 30.00 O
ATOM 320 C3' DT X 16 -22.826 35.768 -9.445 1.00 30.00 C
ATOM 321 O3' DT X 16 -23.731 35.572 -10.512 1.00 30.00 O
ATOM 322 C2' DT X 16 -23.413 36.688 -8.361 1.00 30.00 C
ATOM 323 C1' DT X 16 -23.483 35.806 -7.118 1.00 30.00 C
ATOM 324 N1 DT X 16 -23.047 36.499 -5.900 1.00 30.00 N
ATOM 325 C2 DT X 16 -23.945 36.715 -4.891 1.00 30.00 C
ATOM 326 O2 DT X 16 -25.112 36.370 -4.948 1.00 30.00 O
ATOM 327 N3 DT X 16 -23.428 37.363 -3.807 1.00 30.00 N
ATOM 328 C4 DT X 16 -22.137 37.812 -3.636 1.00 30.00 C
ATOM 329 O4 DT X 16 -21.770 38.378 -2.617 1.00 30.00 O
ATOM 330 C5 DT X 16 -21.245 37.554 -4.728 1.00 30.00 C
ATOM 331 C7 DT X 16 -19.817 38.003 -4.647 1.00 30.00 C
ATOM 332 C6 DT X 16 -21.735 36.918 -5.800 1.00 30.00 C
ATOM 333 P DG X 17 -23.977 36.742 -11.580 1.00 30.00 P
ATOM 334 OP1 DG X 17 -24.567 36.126 -12.789 1.00 30.00 O
ATOM 335 OP2 DG X 17 -22.716 37.507 -11.691 1.00 30.00 O
ATOM 336 O5' DG X 17 -25.071 37.671 -10.874 1.00 30.00 O
ATOM 337 C5' DG X 17 -26.282 37.097 -10.397 1.00 30.00 C
ATOM 338 C4' DG X 17 -26.954 38.010 -9.389 1.00 30.00 C
ATOM 339 O4' DG X 17 -26.206 38.019 -8.165 1.00 30.00 O
ATOM 340 C3' DG X 17 -27.023 39.474 -9.794 1.00 30.00 C
ATOM 341 O3' DG X 17 -28.224 39.731 -10.496 1.00 30.00 O
ATOM 342 C2' DG X 17 -26.993 40.226 -8.455 1.00 30.00 C
ATOM 343 C1' DG X 17 -26.607 39.150 -7.437 1.00 30.00 C
ATOM 344 N9 DG X 17 -25.537 39.563 -6.539 1.00 30.00 N
ATOM 345 C8 DG X 17 -24.187 39.599 -6.796 1.00 30.00 C
ATOM 346 N7 DG X 17 -23.482 40.032 -5.786 1.00 30.00 N
ATOM 347 C5 DG X 17 -24.429 40.307 -4.806 1.00 30.00 C
ATOM 348 C6 DG X 17 -24.268 40.805 -3.491 1.00 30.00 C
ATOM 349 O6 DG X 17 -23.216 41.110 -2.909 1.00 30.00 O
ATOM 350 N1 DG X 17 -25.497 40.939 -2.838 1.00 30.00 N
ATOM 351 C2 DG X 17 -26.722 40.621 -3.394 1.00 30.00 C
ATOM 352 N2 DG X 17 -27.808 40.806 -2.626 1.00 30.00 N
ATOM 353 N3 DG X 17 -26.874 40.157 -4.619 1.00 30.00 N
ATOM 354 C4 DG X 17 -25.694 40.026 -5.262 1.00 30.00 C
ATOM 355 P DC X 18 -28.399 41.116 -11.290 1.00 30.00 P
ATOM 356 OP1 DC X 18 -29.475 40.937 -12.291 1.00 30.00 O
ATOM 357 OP2 DC X 18 -27.045 41.534 -11.717 1.00 30.00 O
ATOM 358 O5' DC X 18 -28.889 42.148 -10.167 1.00 30.00 O
ATOM 359 C5' DC X 18 -30.190 42.028 -9.598 1.00 30.00 C
ATOM 360 C4' DC X 18 -30.357 42.996 -8.440 1.00 30.00 C
ATOM 361 O4' DC X 18 -29.522 42.567 -7.341 1.00 30.00 O
ATOM 362 C3' DC X 18 -29.930 44.423 -8.738 1.00 30.00 C
ATOM 363 O3' DC X 18 -31.031 45.159 -9.256 1.00 30.00 O
ATOM 364 C2' DC X 18 -29.505 44.947 -7.367 1.00 30.00 C
ATOM 365 C1' DC X 18 -29.004 43.687 -6.651 1.00 30.00 C
ATOM 366 N1 DC X 18 -27.504 43.568 -6.588 1.00 30.00 N
ATOM 367 C2 DC X 18 -26.875 43.416 -5.345 1.00 30.00 C
ATOM 368 O2 DC X 18 -27.567 43.383 -4.323 1.00 30.00 O
ATOM 369 N3 DC X 18 -25.521 43.319 -5.296 1.00 30.00 N
ATOM 370 C4 DC X 18 -24.813 43.348 -6.434 1.00 30.00 C
ATOM 371 N4 DC X 18 -23.484 43.238 -6.349 1.00 30.00 N
ATOM 372 C5 DC X 18 -25.437 43.503 -7.708 1.00 30.00 C
ATOM 373 C6 DC X 18 -26.769 43.608 -7.737 1.00 30.00 C
TER
ATOM 374 O5' DT Y 5 -29.913 -51.862 23.113 1.00 30.00 O
ATOM 375 C5' DT Y 5 -30.530 -50.710 22.551 1.00 30.00 C
ATOM 376 C4' DT Y 5 -29.803 -49.443 22.968 1.00 30.00 C
ATOM 377 O4' DT Y 5 -28.370 -49.645 22.882 1.00 30.00 O
ATOM 378 C3' DT Y 5 -30.110 -48.213 22.114 1.00 30.00 C
ATOM 379 O3' DT Y 5 -30.260 -47.085 22.945 1.00 30.00 O
ATOM 380 C2' DT Y 5 -28.873 -48.085 21.232 1.00 30.00 C
ATOM 381 C1' DT Y 5 -27.790 -48.572 22.178 1.00 30.00 C
ATOM 382 N1 DT Y 5 -26.569 -49.068 21.481 1.00 30.00 N
ATOM 383 C2 DT Y 5 -25.717 -49.924 22.135 1.00 30.00 C
ATOM 384 O2 DT Y 5 -25.895 -50.302 23.272 1.00 30.00 O
ATOM 385 N3 DT Y 5 -24.633 -50.322 21.404 1.00 30.00 N
ATOM 386 C4 DT Y 5 -24.317 -49.957 20.112 1.00 30.00 C
ATOM 387 O4 DT Y 5 -23.314 -50.370 19.539 1.00 30.00 O
ATOM 388 C5 DT Y 5 -25.252 -49.056 19.482 1.00 30.00 C
ATOM 389 C7 DT Y 5 -25.019 -48.589 18.077 1.00 30.00 C
ATOM 390 C6 DT Y 5 -26.320 -48.660 20.189 1.00 30.00 C
ATOM 391 P DC Y 6 -31.679 -46.769 23.625 1.00 30.00 P
ATOM 392 OP1 DC Y 6 -32.607 -47.877 23.306 1.00 30.00 O
ATOM 393 OP2 DC Y 6 -32.034 -45.378 23.264 1.00 30.00 O
ATOM 394 O5' DC Y 6 -31.370 -46.798 25.191 1.00 30.00 O
ATOM 395 C5' DC Y 6 -30.887 -45.636 25.827 1.00 30.00 C
ATOM 396 C4' DC Y 6 -29.422 -45.779 26.179 1.00 30.00 C
ATOM 397 O4' DC Y 6 -28.725 -46.486 25.128 1.00 30.00 O
ATOM 398 C3' DC Y 6 -28.685 -44.470 26.316 1.00 30.00 C
ATOM 399 O3' DC Y 6 -28.894 -43.952 27.620 1.00 30.00 O
ATOM 400 C2' DC Y 6 -27.237 -44.898 26.083 1.00 30.00 C
ATOM 401 C1' DC Y 6 -27.395 -46.012 25.038 1.00 30.00 C
ATOM 402 N1 DC Y 6 -27.139 -45.573 23.630 1.00 30.00 N
ATOM 403 C2 DC Y 6 -25.916 -45.870 23.029 1.00 30.00 C
ATOM 404 O2 DC Y 6 -25.063 -46.492 23.673 1.00 30.00 O
ATOM 405 N3 DC Y 6 -25.702 -45.478 21.752 1.00 30.00 N
ATOM 406 C4 DC Y 6 -26.634 -44.812 21.088 1.00 30.00 C
ATOM 407 N4 DC Y 6 -26.355 -44.444 19.833 1.00 30.00 N
ATOM 408 C5 DC Y 6 -27.894 -44.497 21.677 1.00 30.00 C
ATOM 409 C6 DC Y 6 -28.105 -44.901 22.936 1.00 30.00 C
ATOM 410 P DT Y 7 -28.533 -42.425 27.954 1.00 30.00 P
ATOM 411 OP1 DT Y 7 -29.347 -41.996 29.115 1.00 30.00 O
ATOM 412 OP2 DT Y 7 -28.584 -41.659 26.689 1.00 30.00 O
ATOM 413 O5' DT Y 7 -27.012 -42.511 28.411 1.00 30.00 O
ATOM 414 C5' DT Y 7 -26.349 -41.371 28.891 1.00 30.00 C
ATOM 415 C4' DT Y 7 -24.859 -41.609 28.869 1.00 30.00 C
ATOM 416 O4' DT Y 7 -24.570 -42.685 27.949 1.00 30.00 O
ATOM 417 C3' DT Y 7 -24.042 -40.423 28.400 1.00 30.00 C
ATOM 418 O3' DT Y 7 -23.633 -39.684 29.522 1.00 30.00 O
ATOM 419 C2' DT Y 7 -22.853 -41.068 27.693 1.00 30.00 C
ATOM 420 C1' DT Y 7 -23.442 -42.372 27.162 1.00 30.00 C
ATOM 421 N1 DT Y 7 -23.865 -42.329 25.732 1.00 30.00 N
ATOM 422 C2 DT Y 7 -22.921 -42.505 24.756 1.00 30.00 C
ATOM 423 O2 DT Y 7 -21.743 -42.668 24.999 1.00 30.00 O
ATOM 424 N3 DT Y 7 -23.405 -42.478 23.475 1.00 30.00 N
ATOM 425 C4 DT Y 7 -24.718 -42.292 23.083 1.00 30.00 C
ATOM 426 O4 DT Y 7 -25.055 -42.279 21.902 1.00 30.00 O
ATOM 427 C5 DT Y 7 -25.663 -42.120 24.161 1.00 30.00 C
ATOM 428 C7 DT Y 7 -27.114 -41.906 23.864 1.00 30.00 C
ATOM 429 C6 DT Y 7 -25.197 -42.150 25.419 1.00 30.00 C
ATOM 430 P DG Y 8 -23.593 -38.086 29.451 1.00 30.00 P
ATOM 431 OP1 DG Y 8 -23.347 -37.579 30.820 1.00 30.00 O
ATOM 432 OP2 DG Y 8 -24.795 -37.660 28.700 1.00 30.00 O
ATOM 433 O5' DG Y 8 -22.303 -37.789 28.564 1.00 30.00 O
ATOM 434 C5' DG Y 8 -21.034 -38.257 28.992 1.00 30.00 C
ATOM 435 C4' DG Y 8 -20.019 -38.120 27.880 1.00 30.00 C
ATOM 436 O4' DG Y 8 -20.495 -38.844 26.718 1.00 30.00 O
ATOM 437 C3' DG Y 8 -19.786 -36.688 27.400 1.00 30.00 C
ATOM 438 O3' DG Y 8 -18.437 -36.539 26.951 1.00 30.00 O
ATOM 439 C2' DG Y 8 -20.776 -36.572 26.248 1.00 30.00 C
ATOM 440 C1' DG Y 8 -20.639 -37.952 25.637 1.00 30.00 C
ATOM 441 N9 DG Y 8 -21.776 -38.362 24.834 1.00 30.00 N
ATOM 442 C8 DG Y 8 -23.045 -38.675 25.260 1.00 30.00 C
ATOM 443 N7 DG Y 8 -23.848 -39.014 24.288 1.00 30.00 N
ATOM 444 C5 DG Y 8 -23.055 -38.917 23.149 1.00 30.00 C
ATOM 445 C6 DG Y 8 -23.370 -39.157 21.783 1.00 30.00 C
ATOM 446 O6 DG Y 8 -24.443 -39.536 21.288 1.00 30.00 O
ATOM 447 N1 DG Y 8 -22.260 -38.946 20.959 1.00 30.00 N
ATOM 448 C2 DG Y 8 -21.029 -38.544 21.412 1.00 30.00 C
ATOM 449 N2 DG Y 8 -20.096 -38.371 20.489 1.00 30.00 N
ATOM 450 N3 DG Y 8 -20.731 -38.313 22.678 1.00 30.00 N
ATOM 451 C4 DG Y 8 -21.786 -38.518 23.484 1.00 30.00 C
ATOM 452 P DA Y 9 -17.749 -35.085 26.899 1.00 30.00 P
ATOM 453 OP1 DA Y 9 -16.592 -35.057 27.832 1.00 30.00 O
ATOM 454 OP2 DA Y 9 -18.828 -34.091 27.076 1.00 30.00 O
ATOM 455 O5' DA Y 9 -17.219 -34.975 25.392 1.00 30.00 O
ATOM 456 C5' DA Y 9 -15.982 -35.582 25.028 1.00 30.00 C
ATOM 457 C4' DA Y 9 -15.841 -35.668 23.514 1.00 30.00 C
ATOM 458 O4' DA Y 9 -17.057 -36.237 22.944 1.00 30.00 O
ATOM 459 C3' DA Y 9 -15.641 -34.331 22.802 1.00 30.00 C
ATOM 460 O3' DA Y 9 -14.816 -34.505 21.655 1.00 30.00 O
ATOM 461 C2' DA Y 9 -17.062 -33.961 22.404 1.00 30.00 C
ATOM 462 C1' DA Y 9 -17.629 -35.320 22.036 1.00 30.00 C
ATOM 463 N9 DA Y 9 -19.075 -35.381 22.157 1.00 30.00 N
ATOM 464 C8 DA Y 9 -19.818 -35.263 23.296 1.00 30.00 C
ATOM 465 N7 DA Y 9 -21.108 -35.344 23.097 1.00 30.00 N
ATOM 466 C5 DA Y 9 -21.215 -35.512 21.731 1.00 30.00 C
ATOM 467 C6 DA Y 9 -22.319 -35.677 20.872 1.00 30.00 C
ATOM 468 N6 DA Y 9 -23.595 -35.676 21.272 1.00 30.00 N
ATOM 469 N1 DA Y 9 -22.065 -35.851 19.571 1.00 30.00 N
ATOM 470 C2 DA Y 9 -20.801 -35.824 19.140 1.00 30.00 C
ATOM 471 N3 DA Y 9 -19.684 -35.690 19.843 1.00 30.00 N
ATOM 472 C4 DA Y 9 -19.964 -35.538 21.143 1.00 30.00 C
ATOM 473 P DT Y 10 -14.338 -33.230 20.800 1.00 30.00 P
ATOM 474 OP1 DT Y 10 -12.873 -33.327 20.634 1.00 30.00 O
ATOM 475 OP2 DT Y 10 -14.911 -32.004 21.391 1.00 30.00 O
ATOM 476 O5' DT Y 10 -15.033 -33.445 19.381 1.00 30.00 O
ATOM 477 C5' DT Y 10 -14.762 -34.621 18.637 1.00 30.00 C
ATOM 478 C4' DT Y 10 -15.447 -34.580 17.286 1.00 30.00 C
ATOM 479 O4' DT Y 10 -16.882 -34.553 17.463 1.00 30.00 O
ATOM 480 C3' DT Y 10 -15.130 -33.356 16.424 1.00 30.00 C
ATOM 481 O3' DT Y 10 -15.199 -33.728 15.073 1.00 30.00 O
ATOM 482 C2' DT Y 10 -16.275 -32.433 16.762 1.00 30.00 C
ATOM 483 C1' DT Y 10 -17.386 -33.448 16.751 1.00 30.00 C
ATOM 484 N1 DT Y 10 -18.570 -32.995 17.402 1.00 30.00 N
ATOM 485 C2 DT Y 10 -19.668 -32.755 16.642 1.00 30.00 C
ATOM 486 O2 DT Y 10 -19.692 -32.918 15.441 1.00 30.00 O
ATOM 487 N3 DT Y 10 -20.751 -32.323 17.331 1.00 30.00 N
ATOM 488 C4 DT Y 10 -20.862 -32.125 18.693 1.00 30.00 C
ATOM 489 O4 DT Y 10 -21.926 -31.750 19.231 1.00 30.00 O
ATOM 490 C5 DT Y 10 -19.658 -32.395 19.444 1.00 30.00 C
ATOM 491 C7 DT Y 10 -19.647 -32.203 20.927 1.00 30.00 C
ATOM 492 C6 DT Y 10 -18.574 -32.813 18.768 1.00 30.00 C
ATOM 493 P DG Y 11 -13.989 -33.377 14.087 1.00 30.00 P
ATOM 494 OP1 DG Y 11 -13.929 -34.472 13.091 1.00 30.00 O
ATOM 495 OP2 DG Y 11 -12.791 -33.084 14.910 1.00 30.00 O
ATOM 496 O5' DG Y 11 -14.453 -32.009 13.410 1.00 30.00 O
ATOM 497 C5' DG Y 11 -14.432 -31.883 12.015 1.00 30.00 C
ATOM 498 C4' DG Y 11 -15.605 -32.605 11.398 1.00 30.00 C
ATOM 499 O4' DG Y 11 -16.668 -32.706 12.384 1.00 30.00 O
ATOM 500 C3' DG Y 11 -16.218 -31.855 10.247 1.00 30.00 C
ATOM 501 O3' DG Y 11 -16.987 -32.728 9.438 1.00 30.00 O
ATOM 502 C2' DG Y 11 -17.090 -30.866 10.995 1.00 30.00 C
ATOM 503 C1' DG Y 11 -17.678 -31.776 12.061 1.00 30.00 C
ATOM 504 N9 DG Y 11 -18.090 -31.056 13.252 1.00 30.00 N
ATOM 505 C8 DG Y 11 -17.300 -30.361 14.136 1.00 30.00 C
ATOM 506 N7 DG Y 11 -17.977 -29.789 15.095 1.00 30.00 N
ATOM 507 C5 DG Y 11 -19.294 -30.112 14.811 1.00 30.00 C
ATOM 508 C6 DG Y 11 -20.484 -29.780 15.497 1.00 30.00 C
ATOM 509 O6 DG Y 11 -20.612 -29.103 16.532 1.00 30.00 O
ATOM 510 N1 DG Y 11 -21.605 -30.322 14.862 1.00 30.00 N
ATOM 511 C2 DG Y 11 -21.561 -31.100 13.716 1.00 30.00 C
ATOM 512 N2 DG Y 11 -22.723 -31.534 13.233 1.00 30.00 N
ATOM 513 N3 DG Y 11 -20.455 -31.413 13.078 1.00 30.00 N
ATOM 514 C4 DG Y 11 -19.369 -30.883 13.671 1.00 30.00 C
ATOM 515 P DA Y 12 -17.511 -32.196 8.020 1.00 30.00 P
ATOM 516 OP1 DA Y 12 -17.983 -33.314 7.174 1.00 30.00 O
ATOM 517 OP2 DA Y 12 -16.398 -31.364 7.524 1.00 30.00 O
ATOM 518 O5' DA Y 12 -18.740 -31.244 8.412 1.00 30.00 O
ATOM 519 C5' DA Y 12 -19.813 -31.754 9.195 1.00 30.00 C
ATOM 520 C4' DA Y 12 -21.140 -31.490 8.528 1.00 30.00 C
ATOM 521 O4' DA Y 12 -22.109 -31.130 9.534 1.00 30.00 O
ATOM 522 C3' DA Y 12 -21.119 -30.353 7.528 1.00 30.00 C
ATOM 523 O3' DA Y 12 -21.912 -30.664 6.382 1.00 30.00 O
ATOM 524 C2' DA Y 12 -21.650 -29.145 8.302 1.00 30.00 C
ATOM 525 C1' DA Y 12 -22.386 -29.743 9.496 1.00 30.00 C
ATOM 526 N9 DA Y 12 -22.000 -29.159 10.783 1.00 30.00 N
ATOM 527 C8 DA Y 12 -20.759 -29.160 11.348 1.00 30.00 C
ATOM 528 N7 DA Y 12 -20.711 -28.574 12.522 1.00 30.00 N
ATOM 529 C5 DA Y 12 -22.008 -28.160 12.743 1.00 30.00 C
ATOM 530 C6 DA Y 12 -22.612 -27.479 13.814 1.00 30.00 C
ATOM 531 N6 DA Y 12 -21.948 -27.095 14.902 1.00 30.00 N
ATOM 532 N1 DA Y 12 -23.917 -27.181 13.707 1.00 30.00 N
ATOM 533 C2 DA Y 12 -24.585 -27.599 12.627 1.00 30.00 C
ATOM 534 N3 DA Y 12 -24.130 -28.259 11.562 1.00 30.00 N
ATOM 535 C4 DA Y 12 -22.821 -28.513 11.685 1.00 30.00 C
ATOM 536 P DA Y 13 -17.968 -25.215 7.927 1.00 30.00 P
ATOM 537 OP1 DA Y 13 -17.098 -24.747 6.824 1.00 30.00 O
ATOM 538 OP2 DA Y 13 -17.598 -24.992 9.344 1.00 30.00 O
ATOM 539 O5' DA Y 13 -19.406 -24.555 7.708 1.00 30.00 O
ATOM 540 C5' DA Y 13 -20.391 -25.252 6.993 1.00 30.00 C
ATOM 541 C4' DA Y 13 -21.436 -25.796 7.937 1.00 30.00 C
ATOM 542 O4' DA Y 13 -20.808 -26.201 9.177 1.00 30.00 O
ATOM 543 C3' DA Y 13 -22.534 -24.806 8.321 1.00 30.00 C
ATOM 544 O3' DA Y 13 -23.779 -25.402 8.143 1.00 30.00 O
ATOM 545 C2' DA Y 13 -22.274 -24.524 9.789 1.00 30.00 C
ATOM 546 C1' DA Y 13 -21.639 -25.818 10.236 1.00 30.00 C
ATOM 547 N9 DA Y 13 -20.833 -25.649 11.423 1.00 30.00 N
ATOM 548 C8 DA Y 13 -19.525 -25.997 11.594 1.00 30.00 C
ATOM 549 N7 DA Y 13 -19.058 -25.701 12.781 1.00 30.00 N
ATOM 550 C5 DA Y 13 -20.131 -25.106 13.419 1.00 30.00 C
ATOM 551 C6 DA Y 13 -20.285 -24.565 14.703 1.00 30.00 C
ATOM 552 N6 DA Y 13 -19.307 -24.541 15.608 1.00 30.00 N
ATOM 553 N1 DA Y 13 -21.483 -24.044 15.021 1.00 30.00 N
ATOM 554 C2 DA Y 13 -22.464 -24.082 14.119 1.00 30.00 C
ATOM 555 N3 DA Y 13 -22.440 -24.566 12.885 1.00 30.00 N
ATOM 556 C4 DA Y 13 -21.226 -25.055 12.590 1.00 30.00 C
ATOM 557 P DC Y 14 -24.681 -24.939 6.908 1.00 30.00 P
ATOM 558 OP1 DC Y 14 -25.718 -25.968 6.676 1.00 30.00 O
ATOM 559 OP2 DC Y 14 -23.731 -24.578 5.831 1.00 30.00 O
ATOM 560 O5' DC Y 14 -25.377 -23.607 7.441 1.00 30.00 O
ATOM 561 C5' DC Y 14 -26.129 -23.645 8.644 1.00 30.00 C
ATOM 562 C4' DC Y 14 -26.006 -22.339 9.396 1.00 30.00 C
ATOM 563 O4' DC Y 14 -24.818 -22.361 10.236 1.00 30.00 O
ATOM 564 C3' DC Y 14 -25.858 -21.092 8.516 1.00 30.00 C
ATOM 565 O3' DC Y 14 -26.502 -20.029 9.149 1.00 30.00 O
ATOM 566 C2' DC Y 14 -24.356 -20.872 8.534 1.00 30.00 C
ATOM 567 C1' DC Y 14 -24.106 -21.174 9.993 1.00 30.00 C
ATOM 568 N1 DC Y 14 -22.689 -21.345 10.351 1.00 30.00 N
ATOM 569 C2 DC Y 14 -22.340 -21.594 11.682 1.00 30.00 C
ATOM 570 O2 DC Y 14 -23.234 -21.701 12.534 1.00 30.00 O
ATOM 571 N3 DC Y 14 -21.031 -21.718 11.999 1.00 30.00 N
ATOM 572 C4 DC Y 14 -20.098 -21.612 11.044 1.00 30.00 C
ATOM 573 N4 DC Y 14 -18.814 -21.759 11.397 1.00 30.00 N
ATOM 574 C5 DC Y 14 -20.440 -21.344 9.687 1.00 30.00 C
ATOM 575 C6 DC Y 14 -21.733 -21.210 9.391 1.00 30.00 C
ATOM 576 P DG Y 15 -27.092 -18.797 8.312 1.00 30.00 P
ATOM 577 OP1 DG Y 15 -27.900 -19.356 7.205 1.00 30.00 O
ATOM 578 OP2 DG Y 15 -25.983 -17.865 8.004 1.00 30.00 O
ATOM 579 O5' DG Y 15 -28.067 -18.118 9.377 1.00 30.00 O
ATOM 580 C5' DG Y 15 -28.904 -18.957 10.175 1.00 30.00 C
ATOM 581 C4' DG Y 15 -28.919 -18.511 11.627 1.00 30.00 C
ATOM 582 O4' DG Y 15 -28.019 -19.336 12.424 1.00 30.00 O
ATOM 583 C3' DG Y 15 -28.448 -17.092 11.874 1.00 30.00 C
ATOM 584 O3' DG Y 15 -29.503 -16.170 11.637 1.00 30.00 O
ATOM 585 C2' DG Y 15 -28.080 -17.181 13.347 1.00 30.00 C
ATOM 586 C1' DG Y 15 -27.332 -18.508 13.358 1.00 30.00 C
ATOM 587 N9 DG Y 15 -25.949 -18.386 12.911 1.00 30.00 N
ATOM 588 C8 DG Y 15 -25.465 -18.739 11.681 1.00 30.00 C
ATOM 589 N7 DG Y 15 -24.195 -18.519 11.537 1.00 30.00 N
ATOM 590 C5 DG Y 15 -23.800 -17.985 12.747 1.00 30.00 C
ATOM 591 C6 DG Y 15 -22.524 -17.561 13.162 1.00 30.00 C
ATOM 592 O6 DG Y 15 -21.479 -17.579 12.512 1.00 30.00 O
ATOM 593 N1 DG Y 15 -22.534 -17.069 14.464 1.00 30.00 N
ATOM 594 C2 DG Y 15 -23.655 -17.005 15.264 1.00 30.00 C
ATOM 595 N2 DG Y 15 -23.476 -16.506 16.495 1.00 30.00 N
ATOM 596 N3 DG Y 15 -24.869 -17.409 14.884 1.00 30.00 N
ATOM 597 C4 DG Y 15 -24.864 -17.885 13.612 1.00 30.00 C
ATOM 598 P DA Y 16 -29.206 -14.590 11.569 1.00 30.00 P
ATOM 599 OP1 DA Y 16 -30.416 -13.966 10.989 1.00 30.00 O
ATOM 600 OP2 DA Y 16 -27.889 -14.375 10.923 1.00 30.00 O
ATOM 601 O5' DA Y 16 -29.098 -14.167 13.109 1.00 30.00 O
ATOM 602 C5' DA Y 16 -28.530 -12.922 13.492 1.00 30.00 C
ATOM 603 C4' DA Y 16 -28.268 -12.919 14.988 1.00 30.00 C
ATOM 604 O4' DA Y 16 -27.616 -14.148 15.368 1.00 30.00 O
ATOM 605 C3' DA Y 16 -27.391 -11.781 15.496 1.00 30.00 C
ATOM 606 O3' DA Y 16 -28.111 -11.042 16.457 1.00 30.00 O
ATOM 607 C2' DA Y 16 -26.170 -12.462 16.129 1.00 30.00 C
ATOM 608 C1' DA Y 16 -26.269 -13.902 15.665 1.00 30.00 C
ATOM 609 N9 DA Y 16 -25.473 -14.224 14.488 1.00 30.00 N
ATOM 610 C8 DA Y 16 -25.916 -14.815 13.343 1.00 30.00 C
ATOM 611 N7 DA Y 16 -24.987 -15.017 12.451 1.00 30.00 N
ATOM 612 C5 DA Y 16 -23.847 -14.532 13.053 1.00 30.00 C
ATOM 613 C6 DA Y 16 -22.521 -14.458 12.615 1.00 30.00 C
ATOM 614 N6 DA Y 16 -22.125 -14.911 11.421 1.00 30.00 N
ATOM 615 N1 DA Y 16 -21.614 -13.918 13.457 1.00 30.00 N
ATOM 616 C2 DA Y 16 -22.027 -13.484 14.658 1.00 30.00 C
ATOM 617 N3 DA Y 16 -23.255 -13.493 15.175 1.00 30.00 N
ATOM 618 C4 DA Y 16 -24.125 -14.040 14.314 1.00 30.00 C
ATOM 619 P DC Y 17 -27.805 -9.482 16.652 1.00 30.00 P
ATOM 620 OP1 DC Y 17 -28.655 -8.987 17.757 1.00 30.00 O
ATOM 621 OP2 DC Y 17 -27.880 -8.841 15.320 1.00 30.00 O
ATOM 622 O5' DC Y 17 -26.280 -9.456 17.113 1.00 30.00 O
ATOM 623 C5' DC Y 17 -25.925 -9.741 18.462 1.00 30.00 C
ATOM 624 C4' DC Y 17 -24.433 -9.547 18.671 1.00 30.00 C
ATOM 625 O4' DC Y 17 -23.708 -10.309 17.671 1.00 30.00 O
ATOM 626 C3' DC Y 17 -23.947 -8.105 18.503 1.00 30.00 C
ATOM 627 O3' DC Y 17 -22.738 -7.899 19.238 1.00 30.00 O
ATOM 628 C2' DC Y 17 -23.664 -8.066 17.014 1.00 30.00 C
ATOM 629 C1' DC Y 17 -22.985 -9.413 16.862 1.00 30.00 C
ATOM 630 N1 DC Y 17 -22.948 -9.923 15.494 1.00 30.00 N
ATOM 631 C2 DC Y 17 -21.719 -10.017 14.895 1.00 30.00 C
ATOM 632 O2 DC Y 17 -20.734 -9.655 15.558 1.00 30.00 O
ATOM 633 N3 DC Y 17 -21.635 -10.491 13.620 1.00 30.00 N
ATOM 634 C4 DC Y 17 -22.754 -10.857 12.975 1.00 30.00 C
ATOM 635 N4 DC Y 17 -22.630 -11.326 11.725 1.00 30.00 N
ATOM 636 C5 DC Y 17 -24.042 -10.767 13.591 1.00 30.00 C
ATOM 637 C6 DC Y 17 -24.091 -10.293 14.842 1.00 30.00 C
TER
END
"""
def run(prefix, target, pdb_strs, eps=1.e-3):
"""
Exercise phenix.angle command.
"""
cntr = 0
for pdb_str in pdb_strs:
of = open("%s.pdb"%prefix, "w")
print >> of, pdb_str
of.close()
cmd1='phenix.angle %s.pdb "chain X" "chain Y" '%(prefix)
cmd2='phenix.angle %s.pdb "chain Y" "chain X" '%(prefix)
for cmd in [cmd1,cmd2]:
print cmd
r = float(easy_run.go(cmd).stdout_lines[2].split()[2])
assert approx_equal(r, target, eps)
cntr += 1
assert cntr == len(pdb_strs)*2, cntr
if (__name__ == "__main__"):
t0 = time.time()
run(prefix="tst_angle_exercise_1", target=90., pdb_strs=[pdb_str_1, pdb_str_2, pdb_str_3])
run(prefix="tst_angle_exercise_2", target=0., pdb_strs=[pdb_str_4, pdb_str_5, pdb_str_6, pdb_str_7])
run(prefix="tst_angle_exercise_3", target=45., pdb_strs=[pdb_str_8, pdb_str_9])
run(prefix="tst_angle_exercise_4", target=8.98, pdb_strs=[pdb_str_10], eps=0.1)
print "Time: %6.4f"%(time.time()-t0)
print "OK"
| 72.845588
| 103
| 0.453131
| 12,752
| 59,442
| 2.106023
| 0.08877
| 0.081211
| 0.118595
| 0.166034
| 0.456248
| 0.425007
| 0.192881
| 0.128091
| 0.124441
| 0.123362
| 0
| 0.59417
| 0.457538
| 59,442
| 815
| 104
| 72.934969
| 0.238704
| 0.000555
| 0
| 0.095597
| 0
| 0.927044
| 0.981756
| 0
| 0
| 0
| 0
| 0
| 0.002516
| 0
| null | null | 0
| 0.005031
| null | null | 0.005031
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
917c71e603b4b6bca0569eed2b41ea4fc703d376
| 37
|
py
|
Python
|
protocols/protocol_1_2_0-opencb/opencb.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | null | null | null |
protocols/protocol_1_2_0-opencb/opencb.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | null | null | null |
protocols/protocol_1_2_0-opencb/opencb.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | null | null | null |
from protocols.opencb_1_2_0 import *
| 18.5
| 36
| 0.837838
| 7
| 37
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.108108
| 37
| 1
| 37
| 37
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91b965470f79912f4f1272ab46874449dcd1e677
| 3,791
|
py
|
Python
|
flask_assistant/utils.py
|
voxally/flask-assistant
|
cd203d351108a911bce3010fe4e14846ef0cb02f
|
[
"Apache-2.0"
] | null | null | null |
flask_assistant/utils.py
|
voxally/flask-assistant
|
cd203d351108a911bce3010fe4e14846ef0cb02f
|
[
"Apache-2.0"
] | null | null | null |
flask_assistant/utils.py
|
voxally/flask-assistant
|
cd203d351108a911bce3010fe4e14846ef0cb02f
|
[
"Apache-2.0"
] | 1
|
2019-11-08T20:42:09.000Z
|
2019-11-08T20:42:09.000Z
|
from __future__ import absolute_import
from typing import Dict, Any
import requests
import os
import sys
import logging
from google.auth import jwt
from flask_assistant.core import Assistant
from . import logger
logger.setLevel(logging.INFO)
GOOGLE_PUBLIC_KEY = {
"8a63fe71e53067524cbbc6a3a58463b3864c0787": "-----BEGIN CERTIFICATE-----\nMIIDJjCCAg6gAwIBAgIIOgLatvPIOogwDQYJKoZIhvcNAQEFBQAwNjE0MDIGA1UE\nAxMrZmVkZXJhdGVkLXNpZ25vbi5zeXN0ZW0uZ3NlcnZpY2VhY2NvdW50LmNvbTAe\nFw0xOTEwMTkxNDQ5MzRaFw0xOTExMDUwMzA0MzRaMDYxNDAyBgNVBAMTK2ZlZGVy\nYXRlZC1zaWdub24uc3lzdGVtLmdzZXJ2aWNlYWNjb3VudC5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJs8t86KhuRio8l/ZL3xOQ/Rh2/PJKo9IA\n1rVHDxAS1PYjGrZ+7UMU6f3yt7WqNifkdA+Vboe+f//D0maUZ36YqD9cm+8RrzVO\nb60/21OuB+UCLONtMd9f36f00WUpr/VuUngcpifW+SGbCEI+a7Jd5vuwdkEie++O\nXiVpUHdVXCKCqCt5kEXrlqh0xxWQirMH7pXL9Yp5GoBH6w1wl/yx1I285LA89D+D\nDcxDfxKJ6bFVWR+efoBaJyG4Qj3tLbrgi7OA0kXlOKmlM+POMm/6rGKxEOoya9p6\nSLU8J7wJj0QpAnEFZk7LpaVt3LWCbM54uxcRUvkiwXIsZfpOrXaZAgMBAAGjODA2\nMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsG\nAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4IBAQAvsqHib9Zv36Z1u09/B1eUkrZovl9F\n8ZzzxkqNgrff9zBrtwstCPRPsz8LMaGWlDHcIqsLVe2nMbZp9ZGGCtZHoCKhiFnj\nOcNaixgGT8+wP6vn5SaNhZgu2AUNb9u6zc0IA59ggGeahSIkA17DqLqb7mIeLEj2\nTo7HTbEqjWZhl7zq01T/R7PQ5w/++InUL7HrXmwYczgJWCh6h5mU5jpYnuXRr+YI\nXEfZOaELe0HHxOfgtkY7P/f2Wb/ls0fbvYwqklxYN+jXjiopZevCoobWDlrGKZ1Z\nt114KpEaJ9RgL23tfePs32VV1NwVVnEtaWD2lijIO3AyIn+I7JHL7MDD\n-----END CERTIFICATE-----\n",
"3db3ed6b9574ee3fcd9f149e59ff0eef4f932153": "-----BEGIN CERTIFICATE-----\nMIIDJjCCAg6gAwIBAgIIeBPD3wqfL6EwDQYJKoZIhvcNAQEFBQAwNjE0MDIGA1UE\nAxMrZmVkZXJhdGVkLXNpZ25vbi5zeXN0ZW0uZ3NlcnZpY2VhY2NvdW50LmNvbTAe\nFw0xOTEwMTExNDQ5MzRaFw0xOTEwMjgwMzA0MzRaMDYxNDAyBgNVBAMTK2ZlZGVy\nYXRlZC1zaWdub24uc3lzdGVtLmdzZXJ2aWNlYWNjb3VudC5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDYpym/gLFOh4IoQhfOeGo+DbUyEIA/0Odf\nmzb9R1nVvM5WFHyqKiT8/yPvLxgXYzYlzyvZu18KAkYWWNuS21Vzhe+d4949P6EZ\n/096QjVFSHvKTo94bSQImeZxZiBhfFcvw/RMM0eTeZZPgOXI3YIJyWjAZ9FUslt7\nWoLU0HZFc/JyPRF8M2kinkdYxnzA+MjzCetXlqmhAr+wLPg/QLKwACyRIF2FJHgf\nPsvqaeF7JXo0zHPcGuHUOqXCHon6KiHZF7OC4bzTuTEzVipJTLYy9QUyL4M2L8bQ\nu1ISUSaXhj+i1WT0RDJwqpioOFprVFqqkVvbUW0nXD/x1UA4nvf7AgMBAAGjODA2\nMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsG\nAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4IBAQBr5+4ZvfhP436NdJgN0Jn7iwwVArus\nXUn0hfuBbCoj1DhuRkP9wyLCpOo6cQS0T5bURVZzirsKc5sXP4fNYXqbfLaBpc7n\njTUtTOIqoA4LKPU7/FH6Qt/UfZ4DQIsKaD3087KdY3ePatSn/HTxvT8Ghqy/JGjf\nLXZehQnlyyCRaCMqv1gEOMiY/8LG3d1hLL7CMphnb4ASk0YMKrWkKhIoa6NWU2Rd\nqp01F4iG44ABpea+ymXAGmWBVPnep51kr/wIPIzr9WvNFAAZW2Enk3+kUWNupuz+\npdXq9KnegVsCs4G7QcTPqwc/vMu7uGq/pruDEOYVOd9Rm+rr0wlMgkcf\n-----END CERTIFICATE-----\n",
}
def import_with_3(module_name, path):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def import_with_2(module_name, path):
import imp
return imp.load_source(module_name, path)
def get_assistant(filename):
"""Imports a module from filename as a string, returns the contained Assistant object"""
agent_name = os.path.splitext(filename)[0]
try:
agent_module = import_with_3(agent_name, os.path.join(os.getcwd(), filename))
except ImportError:
agent_module = import_with_2(agent_name, os.path.join(os.getcwd(), filename))
for name, obj in agent_module.__dict__.items():
if isinstance(obj, Assistant):
return obj
def decode_token(token, client_id):
resp = requests.get('https://www.googleapis.com/oauth2/v1/certs')
public_keys = resp.json()
decoded = jwt.decode(token, certs=public_keys, verify=True, audience=client_id)
return decoded
| 67.696429
| 1,221
| 0.86125
| 297
| 3,791
| 10.848485
| 0.585859
| 0.012415
| 0.017381
| 0.013966
| 0.021726
| 0.021726
| 0.021726
| 0.021726
| 0
| 0
| 0
| 0.095682
| 0.065418
| 3,791
| 55
| 1,222
| 68.927273
| 0.813717
| 0.02163
| 0
| 0
| 0
| 0.054054
| 0.664866
| 0.637321
| 0
| 1
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.486486
| 0
| 0.702703
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
91ba770106aec76e30afa5083b47f2e5a048161f
| 3,130
|
py
|
Python
|
saleor/graphql/checkout/tests/deprecated/test_checkout_language_code_update.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/checkout/tests/deprecated/test_checkout_language_code_update.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/checkout/tests/deprecated/test_checkout_language_code_update.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
from .....checkout.error_codes import CheckoutErrorCode
from ....tests.utils import get_graphql_content
MUTATION_CHECKOUT_UPDATE_LANGUAGE_CODE = """
mutation checkoutLanguageCodeUpdate(
$checkoutId: ID, $token: UUID, $languageCode: LanguageCodeEnum!
){
checkoutLanguageCodeUpdate(
checkoutId: $checkoutId, token: $token, languageCode: $languageCode
){
checkout{
id
languageCode
}
errors{
field
message
code
}
}
}
"""
def test_checkout_update_language_code_by_id(
user_api_client,
checkout_with_gift_card,
):
language_code = "PL"
checkout = checkout_with_gift_card
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {"checkoutId": checkout_id, "languageCode": language_code}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_UPDATE_LANGUAGE_CODE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutLanguageCodeUpdate"]
assert not data["errors"]
assert data["checkout"]["languageCode"] == language_code
checkout.refresh_from_db()
assert checkout.language_code == language_code.lower()
def test_checkout_update_language_code_by_token(
user_api_client,
checkout_with_gift_card,
):
language_code = "PL"
checkout = checkout_with_gift_card
variables = {"token": checkout.token, "languageCode": language_code}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_UPDATE_LANGUAGE_CODE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutLanguageCodeUpdate"]
assert not data["errors"]
assert data["checkout"]["languageCode"] == language_code
checkout.refresh_from_db()
assert checkout.language_code == language_code.lower()
def test_checkout_update_language_code_neither_token_and_id_given(
user_api_client,
checkout_with_gift_card,
):
language_code = "PL"
variables = {"languageCode": language_code}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_UPDATE_LANGUAGE_CODE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutLanguageCodeUpdate"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
def test_checkout_update_language_code_both_token_and_id_given(
user_api_client,
checkout_with_gift_card,
):
language_code = "PL"
checkout = checkout_with_gift_card
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"token": checkout.token,
"languageCode": language_code,
}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_UPDATE_LANGUAGE_CODE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutLanguageCodeUpdate"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
| 28.198198
| 76
| 0.723003
| 345
| 3,130
| 6.182609
| 0.165217
| 0.129395
| 0.092827
| 0.109705
| 0.841069
| 0.825129
| 0.809658
| 0.792311
| 0.792311
| 0.792311
| 0
| 0.001555
| 0.178275
| 3,130
| 110
| 77
| 28.454545
| 0.827761
| 0
| 0
| 0.595506
| 0
| 0
| 0.207348
| 0.050479
| 0
| 0
| 0
| 0
| 0.134831
| 1
| 0.044944
| false
| 0
| 0.033708
| 0
| 0.078652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91c5f57ca4de22837ca8a350712f86d9e6643066
| 8,235
|
py
|
Python
|
src/tests/test_profiler.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 1
|
2018-10-24T21:43:00.000Z
|
2018-10-24T21:43:00.000Z
|
src/tests/test_profiler.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 1
|
2018-03-10T01:25:47.000Z
|
2018-03-10T03:33:36.000Z
|
src/tests/test_profiler.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 6
|
2018-01-13T17:54:31.000Z
|
2018-02-13T23:46:50.000Z
|
import pytest
from data_logger import DataLogger
from profiler import TrapezoidalProfile
def test_cruise_velocity_positive1():
"""
cruise velocity must always be positive
"""
with pytest.raises(AssertionError):
profiler = TrapezoidalProfile(cruise_v=-10, a=20, target_pos=0, tolerance=.5)
def test_cruise_velocity_positive2():
"""
cruise velocity must always be positive
"""
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=0, tolerance=.5)
with pytest.raises(AssertionError):
profiler.setCruiseVelocityScale(-1)
def test_cruise_velocity_positive3():
"""
cruise velocity must always be positive
"""
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=0, tolerance=.5)
# i will hurt anyone who actually does this
profiler._cruise_v = -10
with pytest.raises(AssertionError):
profiler.calculate_new_velocity(0, 0.01)
def test_target_position_may_be_negative():
"""
target position may be negative
"""
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=-1000, tolerance=.5)
def test_target_velocity_may_be_negative():
"""
target position may be negative
"""
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=-1000, tolerance=.5)
profiler.calculate_new_velocity(0, 0.01)
assert profiler.current_target_v < 0
assert profiler.current_a < 0
@pytest.mark.parametrize("target_pos, current_pos, expectedResult", [
(0, 0, True),
(0, 0.51, False),
(0, 0.5, False),
(0, 0.499, True),
(0, -0.499, True),
(0, -0.5, False),
(0, -0.51, False),
])
def test_isFinished1(target_pos, current_pos, expectedResult):
"""
when not moving, velocity=0, we are done when our current position is within tolerance of the target position
"""
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=target_pos, tolerance=.5, current_target_v=0)
assert profiler.isFinished(current_pos) == expectedResult
def test_isFinished2():
"""
when moving, we are not done even when our current position is within tolerance of the target position because
overshoot has happened
"""
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=0, tolerance=.5, current_target_v=10)
assert profiler.isFinished(0) == False
log_trajectory = False
def test_profiler1():
"""
forward velocity, trapezoid, no overshoot
"""
DT = 0.02
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=50, tolerance=.5, current_target_v=0)
t = 0
pos = 0
if log_trajectory:
logger = DataLogger("test_profiler1.csv")
logger.log_while_disabled = True
logger.do_print = True
logger.add('t', lambda: t)
logger.add('pos', lambda: pos)
logger.add('v', lambda: profiler.current_target_v)
logger.add('a', lambda: profiler.current_a)
while not profiler.isFinished(pos):
if log_trajectory:
logger.log()
profiler.calculate_new_velocity(pos, DT)
pos += profiler.current_target_v * DT
t += DT
if t > 10:
if log_trajectory:
logger.close()
assert False, "sim loop timed out"
if t < 0.501:
assert profiler.current_a == pytest.approx(20, 0.01), "t = %f" % (t,)
if 0.501 < t < 5:
assert profiler.current_target_v == pytest.approx(10., 0.01), "t = %f" % (t,)
assert profiler.current_a == 0, "t = %f" % (t,)
if 5 < t < 5.5 - 0.001:
assert profiler.current_a == pytest.approx(-20., 0.01), "t = %f" % (t,)
if t == pytest.approx(5.50, 0.001):
assert profiler.current_a == pytest.approx(0., 0.01), "t = %f" % (t,)
assert profiler.current_target_v == pytest.approx(0., 0.01), "t = %f" % (t,)
if log_trajectory:
logger.log()
logger.close()
assert t == pytest.approx(5.52, 0.01)
assert profiler.current_a == 0
def test_profiler2():
"""
forward velocity, triangle, no overshoot
"""
DT = 0.02
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=4, tolerance=.5, current_target_v=0)
t = 0
pos = 0
if log_trajectory:
logger = DataLogger("test_profiler2.csv")
logger.log_while_disabled = True
logger.add('t', lambda: t)
logger.add('pos', lambda: pos)
logger.add('v', lambda: profiler.current_target_v)
logger.add('a', lambda: profiler.current_a)
while not profiler.isFinished(pos):
if log_trajectory:
logger.log()
profiler.calculate_new_velocity(pos, DT)
pos += profiler.current_target_v * DT
t += DT
if t > 10:
if log_trajectory:
logger.close()
assert False, "sim loop timed out"
if t < 0.4599:
assert profiler.current_a == pytest.approx(20, 0.01), "t = %f" % (t,)
if t == pytest.approx(0.46, 0.01):
assert profiler.current_target_v == pytest.approx(9.2, 0.01)
assert profiler.current_a == pytest.approx(20, 0.01), "t = %f" % (t,)
if 0.4601 < t < 0.92:
assert profiler.current_a == pytest.approx(-20, 0.01), "t = %f" % (t,)
if log_trajectory:
logger.log()
logger.close()
assert 0.8 < t < 1.0
assert profiler.current_a == 0
def test_profiler3():
"""
reverse velocity, trapezoid, no overshoot
"""
DT = 0.02
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=-50, tolerance=.5, current_target_v=0)
t = 0
pos = 0
if log_trajectory:
logger = DataLogger("test_profiler3.csv")
logger.log_while_disabled = True
logger.add('t', lambda: t)
logger.add('pos', lambda: pos)
logger.add('v', lambda: profiler.current_target_v)
logger.add('a', lambda: profiler.current_a)
while not profiler.isFinished(pos):
if log_trajectory:
logger.log()
profiler.calculate_new_velocity(pos, DT)
pos += profiler.current_target_v * DT
t += DT
if t > 10:
if log_trajectory:
logger.close()
assert False, "sim loop timed out"
if t < 0.501:
assert profiler.current_a == pytest.approx(-20, 0.01), "t = %f" % (t,)
if 0.501 < t < 5.0-0.001:
assert profiler.current_target_v == pytest.approx(-10., 0.01), "t = %f" % (t,)
assert profiler.current_a == 0, "t = %f" % (t,)
if 5.0 < t < 5.52-0.001:
assert profiler.current_a == pytest.approx(20, 0.01), "t = %f" % (t,)
if log_trajectory:
logger.log()
logger.close()
assert 5 < t < 6
assert profiler.current_a == 0
def test_profiler4():
"""
reverse velocity, triangle, no overshoot
"""
DT = 0.02
profiler = TrapezoidalProfile(cruise_v=10, a=20, target_pos=-4, tolerance=.5, current_target_v=0)
t = 0
pos = 0
if log_trajectory:
logger = DataLogger("test_profiler4.csv")
logger.log_while_disabled = True
logger.add('t', lambda: t)
logger.add('pos', lambda: pos)
logger.add('v', lambda: profiler.current_target_v)
logger.add('a', lambda: profiler.current_a)
while not profiler.isFinished(pos):
if log_trajectory:
logger.log()
profiler.calculate_new_velocity(pos, DT)
pos += profiler.current_target_v * DT
t += DT
if t > 10:
if log_trajectory:
logger.close()
assert False, "sim loop timed out"
if t < 0.4599:
assert profiler.current_a == pytest.approx(-20, 0.01), "t = %f" % (t,)
if t == pytest.approx(0.46, 0.01):
assert profiler.current_target_v == pytest.approx(-9.2, 0.01)
assert profiler.current_a == pytest.approx(-20, 0.01), "t = %f" % (t,)
if 0.4601 < t < 0.92:
assert profiler.current_a == pytest.approx(20, 0.01), "t = %f" % (t,)
if log_trajectory:
logger.log()
logger.close()
assert t == pytest.approx(0.94, 0.01)
assert profiler.current_a == 0
| 29.202128
| 114
| 0.60085
| 1,109
| 8,235
| 4.317403
| 0.107304
| 0.112782
| 0.105263
| 0.082707
| 0.861111
| 0.820175
| 0.792189
| 0.739348
| 0.722849
| 0.713033
| 0
| 0.057913
| 0.27031
| 8,235
| 281
| 115
| 29.30605
| 0.738892
| 0.077231
| 0
| 0.674157
| 0
| 0
| 0.040786
| 0
| 0
| 0
| 0
| 0
| 0.207865
| 1
| 0.061798
| false
| 0
| 0.016854
| 0
| 0.078652
| 0.005618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91e24c67f9a253d6c6e6b22bdba3cb5c2e3f9835
| 131
|
py
|
Python
|
Ch_4_Py Crust_Code Structure/4_8.py
|
brianchiang-tw/Introducing_Python
|
557fcddb6329741a177d6ee1d24122b36e106235
|
[
"MIT"
] | 1
|
2020-07-21T08:34:08.000Z
|
2020-07-21T08:34:08.000Z
|
Ch_4_Py Crust_Code Structure/4_8.py
|
brianchiang-tw/Introducing_Python
|
557fcddb6329741a177d6ee1d24122b36e106235
|
[
"MIT"
] | null | null | null |
Ch_4_Py Crust_Code Structure/4_8.py
|
brianchiang-tw/Introducing_Python
|
557fcddb6329741a177d6ee1d24122b36e106235
|
[
"MIT"
] | null | null | null |
def good():
return ['Harry', 'Ron', 'Hermione']
# expected output:
'''
['Harry', 'Ron', 'Hermione']
'''
print( good() )
| 11.909091
| 39
| 0.519084
| 13
| 131
| 5.230769
| 0.692308
| 0.235294
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21374
| 131
| 11
| 40
| 11.909091
| 0.660194
| 0.122137
| 0
| 0
| 0
| 0
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
91e27a08167c3670f08ab963218a27e3bde0b72a
| 241
|
py
|
Python
|
janus/qmmm/__init__.py
|
zhenglz/janus
|
f3f1ed3f2b6e377c51e958cae2d919069d221eda
|
[
"BSD-3-Clause"
] | 16
|
2019-04-18T15:45:02.000Z
|
2021-12-17T17:51:18.000Z
|
janus/qmmm/__init__.py
|
zhenglz/janus
|
f3f1ed3f2b6e377c51e958cae2d919069d221eda
|
[
"BSD-3-Clause"
] | 2
|
2019-06-20T16:56:08.000Z
|
2020-08-28T16:09:16.000Z
|
janus/qmmm/__init__.py
|
zhenglz/janus
|
f3f1ed3f2b6e377c51e958cae2d919069d221eda
|
[
"BSD-3-Clause"
] | 8
|
2018-11-16T17:00:58.000Z
|
2022-01-11T05:36:50.000Z
|
from janus.qmmm.qmmm import QMMM
from janus.qmmm.aqmmm import AQMMM
from janus.qmmm.oniom_xs import OniomXS
from janus.qmmm.hot_spot import HotSpot
from janus.qmmm.pap import PAP
from janus.qmmm.sap import SAP
from janus.qmmm.das import DAS
| 30.125
| 39
| 0.825726
| 44
| 241
| 4.477273
| 0.318182
| 0.319797
| 0.461929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116183
| 241
| 7
| 40
| 34.428571
| 0.924883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
37e675f0cc9dce80acdaa323ed6b97d341da0e60
| 39
|
py
|
Python
|
src/masonite/broadcasting/drivers/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 1,816
|
2018-02-14T01:59:51.000Z
|
2022-03-31T17:09:20.000Z
|
src/masonite/broadcasting/drivers/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 340
|
2018-02-11T00:27:26.000Z
|
2022-03-21T12:00:24.000Z
|
src/masonite/broadcasting/drivers/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 144
|
2018-03-18T00:08:16.000Z
|
2022-02-26T01:51:58.000Z
|
from .PusherDriver import PusherDriver
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
530594499bbcc826b011d45e78cefc4f58b371f2
| 157
|
py
|
Python
|
import_library_depressive.py
|
suriya-1403/Depression-Prediction
|
1f45918a97874ac88bc5fd918e1ce664cb9f8876
|
[
"MIT"
] | 1
|
2021-04-10T19:10:19.000Z
|
2021-04-10T19:10:19.000Z
|
import_library_depressive.py
|
suriya-1403/Depression-Prediction
|
1f45918a97874ac88bc5fd918e1ce664cb9f8876
|
[
"MIT"
] | null | null | null |
import_library_depressive.py
|
suriya-1403/Depression-Prediction
|
1f45918a97874ac88bc5fd918e1ce664cb9f8876
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import re
from keras.models import Sequential
from keras.layers import Dense
from keras.preprocessing import sequence
| 22.428571
| 40
| 0.840764
| 25
| 157
| 5.28
| 0.6
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140127
| 157
| 6
| 41
| 26.166667
| 0.977778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5310312c558a95be49568d99f2eb796d703546c0
| 3,355
|
py
|
Python
|
Factories/CreateSignatureMatrices.py
|
dnzprmksz/Movie-Recommender
|
7543ee938b830cc7c74e75c02ffc2a1c7ce366f0
|
[
"MIT"
] | 1
|
2017-07-28T09:26:37.000Z
|
2017-07-28T09:26:37.000Z
|
Factories/CreateSignatureMatrices.py
|
dnzprmksz/movie-recommender
|
7543ee938b830cc7c74e75c02ffc2a1c7ce366f0
|
[
"MIT"
] | 2
|
2017-11-19T00:06:38.000Z
|
2017-11-19T00:06:41.000Z
|
Factories/CreateSignatureMatrices.py
|
dnzprmksz/movie-recommender
|
7543ee938b830cc7c74e75c02ffc2a1c7ce366f0
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import load
from scipy.sparse import csr_matrix, csc_matrix
# Min-hashing with random vectors. More vectors produce better approximation.
def generate_user_signature(num_vectors=120):
# Using normalized utility matrix, since similarity in normalized matrix is more important than vanilla.
loader = load("../Files/NormalizedUtilityMatrixCSR.npz")
utility_csr = csr_matrix((loader["data"], loader["indices"], loader["indptr"]), shape=loader["shape"])
# Get number of movies as the dimensions of the vector space.
dimension = utility_csr.shape[1]
num_users = utility_csr.shape[0]
# Generate random vectors with values between +1 and -1, and form a vertical matrix with them.
vectors = 2 * np.random.rand(dimension, num_vectors) - 1
# Initialize signature matrix with 0s.
signature = np.zeros((num_users, num_vectors))
# Apply Random Hyperplanes and Cosine Distance similarity algorithm on utility matrix.
for user_id in xrange(1, num_users):
rating = utility_csr[user_id]
descriptor = rating.dot(vectors)
signature[user_id] = descriptor
# Post process signature matrix to have only +1/-1 values.
for i in xrange(0, signature.shape[0]):
for j in xrange(0, signature.shape[1]):
if signature[i, j] == 0:
signature[i, j] = np.random.randint(2) # Convert 0s into 0 or 1. New 0s will be considered as negative.
# Label positives as +1 and negatives as -1.
signature[signature > 0] = 1
signature[signature <= 0] = -1
# Move signature matrix to a boolean matrix to compress the size.
compressed = np.full((num_users, num_vectors), False, dtype=bool)
compressed[signature == 1] = True
# Save signature matrix.
np.save("../Files/UserSignature", compressed)
# Min-hashing with random vectors. More vectors produce better approximation.
def generate_movie_signature(num_vectors=240):
# Using normalized utility matrix, since similarity in normalized matrix is more important than vanilla.
loader = load("../Files/NormalizedUtilityMatrixCSC.npz")
utility_csc = csc_matrix((loader["data"], loader["indices"], loader["indptr"]), shape=loader["shape"])
# Get number of users as the dimensions of the vector space.
dimension = utility_csc.shape[0]
num_movies = utility_csc.shape[1]
# Generate random vectors with values between +1 and -1, and form a vertical matrix with them.
vectors = 2 * np.random.rand(dimension, num_vectors) - 1
# Initialize signature matrix with 0s.
signature = np.zeros((num_movies, num_vectors), np.int8)
# Apply Random Hyperplanes and Cosine Distance similarity algorithm on utility matrix.
for movie_id in xrange(1, num_movies):
rating = np.transpose(utility_csc.getcol(movie_id))
descriptor = rating.dot(vectors)
signature[movie_id] = descriptor
# Post process signature matrix to have only +1/-1 values.
for i in xrange(0, signature.shape[0]):
for j in xrange(0, signature.shape[1]):
if signature[i, j] == 0:
signature[i, j] = np.random.randint(2) # Convert 0s into 0 or 1. New 0s will be considered as negative.
# Label positives as +1 and negatives as -1.
signature[signature > 0] = 1
signature[signature <= 0] = -1
# Move signature matrix to a boolean matrix to compress the size.
compressed = np.full((num_movies, num_vectors), False, dtype=bool)
# Save signature matrix.
np.save("../Files/MovieSignature", compressed)
| 40.421687
| 107
| 0.742772
| 497
| 3,355
| 4.937626
| 0.239437
| 0.0326
| 0.02771
| 0.02934
| 0.798696
| 0.767726
| 0.713121
| 0.713121
| 0.713121
| 0.674817
| 0
| 0.021119
| 0.153204
| 3,355
| 82
| 108
| 40.914634
| 0.842661
| 0.419076
| 0
| 0.380952
| 1
| 0
| 0.086708
| 0.063863
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.071429
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72a53f01e24766d4c5b73f60f7e8cedfa5a52937
| 33,955
|
py
|
Python
|
nova/tests/unit/network/test_rpcapi.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/network/test_rpcapi.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/network/test_rpcapi.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2013 Red Hat, Inc.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nUnit Tests for nova.network.rpcapi\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'collections'
newline|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'mox3'
name|'import'
name|'mox'
newline|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
name|'import'
name|'rpcapi'
name|'as'
name|'network_rpcapi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
name|'as'
name|'objects_base'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_instance'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_network'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|NetworkRpcAPITestCase
name|'class'
name|'NetworkRpcAPITestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|member|setUp
indent|' '
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'NetworkRpcAPITestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'multi_host'
op|'='
name|'True'
op|')'
newline|'\n'
nl|'\n'
comment|'# Used to specify the default value expected if no real value is passed'
nl|'\n'
DECL|variable|DefaultArg
dedent|''
name|'DefaultArg'
op|'='
name|'collections'
op|'.'
name|'namedtuple'
op|'('
string|"'DefaultArg'"
op|','
op|'['
string|"'value'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_test_network_api
name|'def'
name|'_test_network_api'
op|'('
name|'self'
op|','
name|'method'
op|','
name|'rpc_method'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'RequestContext'
op|'('
string|"'fake_user'"
op|','
string|"'fake_project'"
op|')'
newline|'\n'
nl|'\n'
name|'rpcapi'
op|'='
name|'network_rpcapi'
op|'.'
name|'NetworkAPI'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNotNone'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'CONF'
op|'.'
name|'network_topic'
op|','
name|'rpcapi'
op|'.'
name|'client'
op|'.'
name|'target'
op|'.'
name|'topic'
op|')'
newline|'\n'
nl|'\n'
name|'expected_retval'
op|'='
string|"'foo'"
name|'if'
name|'rpc_method'
op|'=='
string|"'call'"
name|'else'
name|'None'
newline|'\n'
name|'expected_version'
op|'='
name|'kwargs'
op|'.'
name|'pop'
op|'('
string|"'version'"
op|','
name|'None'
op|')'
newline|'\n'
name|'expected_fanout'
op|'='
name|'kwargs'
op|'.'
name|'pop'
op|'('
string|"'fanout'"
op|','
name|'None'
op|')'
newline|'\n'
name|'expected_kwargs'
op|'='
name|'kwargs'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'for'
name|'k'
op|','
name|'v'
name|'in'
name|'expected_kwargs'
op|'.'
name|'items'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'isinstance'
op|'('
name|'v'
op|','
name|'self'
op|'.'
name|'DefaultArg'
op|')'
op|':'
newline|'\n'
indent|' '
name|'expected_kwargs'
op|'['
name|'k'
op|']'
op|'='
name|'v'
op|'.'
name|'value'
newline|'\n'
name|'kwargs'
op|'.'
name|'pop'
op|'('
name|'k'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'prepare_kwargs'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'if'
name|'expected_version'
op|':'
newline|'\n'
indent|' '
name|'prepare_kwargs'
op|'['
string|"'version'"
op|']'
op|'='
name|'expected_version'
newline|'\n'
dedent|''
name|'if'
name|'expected_fanout'
op|':'
newline|'\n'
indent|' '
name|'prepare_kwargs'
op|'['
string|"'fanout'"
op|']'
op|'='
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
string|"'source_compute'"
name|'in'
name|'expected_kwargs'
op|':'
newline|'\n'
comment|'# Fix up for migrate_instance_* calls.'
nl|'\n'
indent|' '
name|'expected_kwargs'
op|'['
string|"'source'"
op|']'
op|'='
name|'expected_kwargs'
op|'.'
name|'pop'
op|'('
string|"'source_compute'"
op|')'
newline|'\n'
name|'expected_kwargs'
op|'['
string|"'dest'"
op|']'
op|'='
name|'expected_kwargs'
op|'.'
name|'pop'
op|'('
string|"'dest_compute'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'targeted_methods'
op|'='
op|'['
nl|'\n'
string|"'lease_fixed_ip'"
op|','
string|"'release_fixed_ip'"
op|','
string|"'rpc_setup_network_on_host'"
op|','
nl|'\n'
string|"'_rpc_allocate_fixed_ip'"
op|','
string|"'deallocate_fixed_ip'"
op|','
string|"'update_dns'"
op|','
nl|'\n'
string|"'_associate_floating_ip'"
op|','
string|"'_disassociate_floating_ip'"
op|','
nl|'\n'
string|"'lease_fixed_ip'"
op|','
string|"'release_fixed_ip'"
op|','
string|"'migrate_instance_start'"
op|','
nl|'\n'
string|"'migrate_instance_finish'"
op|','
nl|'\n'
string|"'allocate_for_instance'"
op|','
string|"'deallocate_for_instance'"
op|','
nl|'\n'
op|']'
newline|'\n'
name|'targeted_by_instance'
op|'='
op|'['
string|"'deallocate_for_instance'"
op|']'
newline|'\n'
name|'if'
name|'method'
name|'in'
name|'targeted_methods'
name|'and'
op|'('
string|"'host'"
name|'in'
name|'expected_kwargs'
name|'or'
nl|'\n'
string|"'instance'"
name|'in'
name|'expected_kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'method'
name|'in'
name|'targeted_by_instance'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
name|'expected_kwargs'
op|'['
string|"'instance'"
op|']'
op|'['
string|"'host'"
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
name|'expected_kwargs'
op|'['
string|"'host'"
op|']'
newline|'\n'
name|'if'
name|'method'
name|'not'
name|'in'
op|'['
string|"'allocate_for_instance'"
op|','
nl|'\n'
string|"'deallocate_fixed_ip'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'expected_kwargs'
op|'.'
name|'pop'
op|'('
string|"'host'"
op|')'
newline|'\n'
dedent|''
dedent|''
name|'if'
name|'CONF'
op|'.'
name|'multi_host'
op|':'
newline|'\n'
indent|' '
name|'prepare_kwargs'
op|'['
string|"'server'"
op|']'
op|'='
name|'host'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'rpcapi'
op|','
string|"'client'"
op|')'
newline|'\n'
nl|'\n'
name|'version_check'
op|'='
op|'['
nl|'\n'
string|"'deallocate_for_instance'"
op|','
string|"'deallocate_fixed_ip'"
op|','
nl|'\n'
string|"'allocate_for_instance'"
op|','
string|"'release_fixed_ip'"
op|','
string|"'set_network_host'"
op|','
nl|'\n'
string|"'setup_networks_on_host'"
nl|'\n'
op|']'
newline|'\n'
name|'if'
name|'method'
name|'in'
name|'version_check'
op|':'
newline|'\n'
indent|' '
name|'rpcapi'
op|'.'
name|'client'
op|'.'
name|'can_send_version'
op|'('
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'True'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'prepare_kwargs'
op|':'
newline|'\n'
indent|' '
name|'rpcapi'
op|'.'
name|'client'
op|'.'
name|'prepare'
op|'('
op|'**'
name|'prepare_kwargs'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'rpc_method'
op|'='
name|'getattr'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
name|'rpc_method'
op|')'
newline|'\n'
name|'rpc_method'
op|'('
name|'ctxt'
op|','
name|'method'
op|','
op|'**'
name|'expected_kwargs'
op|')'
op|'.'
name|'AndReturn'
op|'('
string|"'foo'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'retval'
op|'='
name|'getattr'
op|'('
name|'rpcapi'
op|','
name|'method'
op|')'
op|'('
name|'ctxt'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_retval'
op|','
name|'retval'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_networks
dedent|''
name|'def'
name|'test_create_networks'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'create_networks'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'arg1'
op|'='
string|"'arg'"
op|','
name|'arg2'
op|'='
string|"'arg'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_delete_network
dedent|''
name|'def'
name|'test_delete_network'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'delete_network'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'uuid'
op|'='
string|"'fake_uuid'"
op|','
name|'fixed_range'
op|'='
string|"'range'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_allocate_for_instance
dedent|''
name|'def'
name|'test_allocate_for_instance'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'allocate_for_instance'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_id'
op|'='
string|"'fake_id'"
op|','
name|'project_id'
op|'='
string|"'fake_id'"
op|','
name|'host'
op|'='
string|"'fake_host'"
op|','
nl|'\n'
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
name|'vpn'
op|'='
name|'False'
op|','
name|'requested_networks'
op|'='
op|'{'
op|'}'
op|','
nl|'\n'
name|'macs'
op|'='
op|'['
op|']'
op|','
name|'version'
op|'='
string|"'1.13'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_deallocate_for_instance
dedent|''
name|'def'
name|'test_deallocate_for_instance'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'deallocate_for_instance'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'requested_networks'
op|'='
name|'self'
op|'.'
name|'DefaultArg'
op|'('
name|'None'
op|')'
op|','
name|'instance'
op|'='
name|'instance'
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.11'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_deallocate_for_instance_with_expected_networks
dedent|''
name|'def'
name|'test_deallocate_for_instance_with_expected_networks'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'deallocate_for_instance'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance'
op|'='
name|'instance'
op|','
name|'requested_networks'
op|'='
op|'{'
op|'}'
op|','
name|'version'
op|'='
string|"'1.11'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_add_fixed_ip_to_instance
dedent|''
name|'def'
name|'test_add_fixed_ip_to_instance'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'add_fixed_ip_to_instance'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_id'
op|'='
string|"'fake_id'"
op|','
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'network_id'
op|'='
string|"'fake_id'"
op|','
name|'version'
op|'='
string|"'1.9'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_remove_fixed_ip_from_instance
dedent|''
name|'def'
name|'test_remove_fixed_ip_from_instance'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'remove_fixed_ip_from_instance'"
op|','
nl|'\n'
name|'rpc_method'
op|'='
string|"'call'"
op|','
name|'instance_id'
op|'='
string|"'fake_id'"
op|','
nl|'\n'
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
name|'host'
op|'='
string|"'fake_host'"
op|','
nl|'\n'
name|'address'
op|'='
string|"'fake_address'"
op|','
name|'version'
op|'='
string|"'1.9'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_add_network_to_project
dedent|''
name|'def'
name|'test_add_network_to_project'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'add_network_to_project'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'project_id'
op|'='
string|"'fake_id'"
op|','
name|'network_uuid'
op|'='
string|"'fake_uuid'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_instance_nw_info
dedent|''
name|'def'
name|'test_get_instance_nw_info'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'get_instance_nw_info'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_id'
op|'='
string|"'fake_id'"
op|','
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'project_id'
op|'='
string|"'fake_id'"
op|','
name|'version'
op|'='
string|"'1.9'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_validate_networks
dedent|''
name|'def'
name|'test_validate_networks'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'validate_networks'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'networks'
op|'='
op|'{'
op|'}'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_dns_domains
dedent|''
name|'def'
name|'test_get_dns_domains'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'get_dns_domains'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_add_dns_entry
dedent|''
name|'def'
name|'test_add_dns_entry'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'add_dns_entry'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'address'
op|'='
string|"'addr'"
op|','
name|'name'
op|'='
string|"'name'"
op|','
name|'dns_type'
op|'='
string|"'foo'"
op|','
name|'domain'
op|'='
string|"'domain'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_modify_dns_entry
dedent|''
name|'def'
name|'test_modify_dns_entry'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'modify_dns_entry'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'address'
op|'='
string|"'addr'"
op|','
name|'name'
op|'='
string|"'name'"
op|','
name|'domain'
op|'='
string|"'domain'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_delete_dns_entry
dedent|''
name|'def'
name|'test_delete_dns_entry'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'delete_dns_entry'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'name'
op|'='
string|"'name'"
op|','
name|'domain'
op|'='
string|"'domain'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_delete_dns_domain
dedent|''
name|'def'
name|'test_delete_dns_domain'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'delete_dns_domain'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'domain'
op|'='
string|"'fake_domain'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_dns_entries_by_address
dedent|''
name|'def'
name|'test_get_dns_entries_by_address'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'get_dns_entries_by_address'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'address'
op|'='
string|"'fake_address'"
op|','
name|'domain'
op|'='
string|"'fake_domain'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_dns_entries_by_name
dedent|''
name|'def'
name|'test_get_dns_entries_by_name'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'get_dns_entries_by_name'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'name'
op|'='
string|"'fake_name'"
op|','
name|'domain'
op|'='
string|"'fake_domain'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_private_dns_domain
dedent|''
name|'def'
name|'test_create_private_dns_domain'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'create_private_dns_domain'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'domain'
op|'='
string|"'fake_domain'"
op|','
name|'av_zone'
op|'='
string|"'fake_zone'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_public_dns_domain
dedent|''
name|'def'
name|'test_create_public_dns_domain'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'create_public_dns_domain'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'domain'
op|'='
string|"'fake_domain'"
op|','
name|'project'
op|'='
string|"'fake_project'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_setup_networks_on_host
dedent|''
name|'def'
name|'test_setup_networks_on_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'RequestContext'
op|'('
string|"'fake_user'"
op|','
string|"'fake_project'"
op|')'
newline|'\n'
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'setup_networks_on_host'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_id'
op|'='
name|'instance'
op|'.'
name|'id'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'teardown'
op|'='
name|'False'
op|','
nl|'\n'
name|'instance'
op|'='
name|'instance'
op|','
name|'version'
op|'='
string|"'1.16'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_setup_networks_on_host_v1_0
dedent|''
name|'def'
name|'test_setup_networks_on_host_v1_0'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'RequestContext'
op|'('
string|"'fake_user'"
op|','
string|"'fake_project'"
op|')'
newline|'\n'
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'host'
op|'='
string|"'fake_host'"
newline|'\n'
name|'teardown'
op|'='
name|'True'
newline|'\n'
name|'rpcapi'
op|'='
name|'network_rpcapi'
op|'.'
name|'NetworkAPI'
op|'('
op|')'
newline|'\n'
name|'call_mock'
op|'='
name|'mock'
op|'.'
name|'Mock'
op|'('
op|')'
newline|'\n'
name|'cctxt_mock'
op|'='
name|'mock'
op|'.'
name|'Mock'
op|'('
name|'call'
op|'='
name|'call_mock'
op|')'
newline|'\n'
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
string|"'can_send_version'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'False'
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
string|"'prepare'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'cctxt_mock'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
nl|'\n'
name|'can_send_mock'
op|','
name|'prepare_mock'
nl|'\n'
op|')'
op|':'
newline|'\n'
indent|' '
name|'rpcapi'
op|'.'
name|'setup_networks_on_host'
op|'('
name|'ctxt'
op|','
name|'instance'
op|'.'
name|'id'
op|','
name|'host'
op|','
name|'teardown'
op|','
nl|'\n'
name|'instance'
op|')'
newline|'\n'
comment|'# assert our mocks were called as expected'
nl|'\n'
dedent|''
name|'can_send_mock'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'1.16'"
op|')'
newline|'\n'
name|'prepare_mock'
op|'.'
name|'assert_called_once_with'
op|'('
name|'version'
op|'='
string|"'1.0'"
op|')'
newline|'\n'
name|'call_mock'
op|'.'
name|'assert_called_once_with'
op|'('
name|'ctxt'
op|','
string|"'setup_networks_on_host'"
op|','
nl|'\n'
name|'host'
op|'='
name|'host'
op|','
name|'teardown'
op|'='
name|'teardown'
op|','
nl|'\n'
name|'instance_id'
op|'='
name|'instance'
op|'.'
name|'id'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_lease_fixed_ip
dedent|''
name|'def'
name|'test_lease_fixed_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'lease_fixed_ip'"
op|','
name|'rpc_method'
op|'='
string|"'cast'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'address'
op|'='
string|"'fake_addr'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_release_fixed_ip
dedent|''
name|'def'
name|'test_release_fixed_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'release_fixed_ip'"
op|','
name|'rpc_method'
op|'='
string|"'cast'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'address'
op|'='
string|"'fake_addr'"
op|','
name|'mac'
op|'='
string|"'fake_mac'"
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.14'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_release_fixed_ip_no_mac_support
dedent|''
name|'def'
name|'test_release_fixed_ip_no_mac_support'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|"# Tests that the mac kwarg is not passed when we can't send version"
nl|'\n'
comment|'# 1.14 to the network manager.'
nl|'\n'
indent|' '
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'RequestContext'
op|'('
string|"'fake_user'"
op|','
string|"'fake_project'"
op|')'
newline|'\n'
name|'address'
op|'='
string|"'192.168.65.158'"
newline|'\n'
name|'host'
op|'='
string|"'fake-host'"
newline|'\n'
name|'mac'
op|'='
string|"'00:0c:29:2c:b2:64'"
newline|'\n'
name|'rpcapi'
op|'='
name|'network_rpcapi'
op|'.'
name|'NetworkAPI'
op|'('
op|')'
newline|'\n'
name|'cast_mock'
op|'='
name|'mock'
op|'.'
name|'Mock'
op|'('
op|')'
newline|'\n'
name|'cctxt_mock'
op|'='
name|'mock'
op|'.'
name|'Mock'
op|'('
name|'cast'
op|'='
name|'cast_mock'
op|')'
newline|'\n'
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
string|"'can_send_version'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'False'
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
string|"'prepare'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'cctxt_mock'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
nl|'\n'
name|'can_send_mock'
op|','
name|'prepare_mock'
nl|'\n'
op|')'
op|':'
newline|'\n'
indent|' '
name|'rpcapi'
op|'.'
name|'release_fixed_ip'
op|'('
name|'ctxt'
op|','
name|'address'
op|','
name|'host'
op|','
name|'mac'
op|')'
newline|'\n'
comment|'# assert our mocks were called as expected 232'
nl|'\n'
dedent|''
name|'can_send_mock'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'1.14'"
op|')'
newline|'\n'
name|'prepare_mock'
op|'.'
name|'assert_called_once_with'
op|'('
name|'server'
op|'='
name|'host'
op|','
name|'version'
op|'='
string|"'1.0'"
op|')'
newline|'\n'
name|'cast_mock'
op|'.'
name|'assert_called_once_with'
op|'('
name|'ctxt'
op|','
string|"'release_fixed_ip'"
op|','
nl|'\n'
name|'address'
op|'='
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_network_host
dedent|''
name|'def'
name|'test_set_network_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network'
op|'='
name|'fake_network'
op|'.'
name|'fake_network_obj'
op|'('
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'set_network_host'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'network_ref'
op|'='
name|'network'
op|','
name|'version'
op|'='
string|"'1.15'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_network_host_network_object_to_primitive
dedent|''
name|'def'
name|'test_set_network_host_network_object_to_primitive'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Tests that the network object is converted to a primitive if it'
nl|'\n'
comment|"# can't send version 1.15."
nl|'\n'
indent|' '
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'RequestContext'
op|'('
string|"'fake_user'"
op|','
string|"'fake_project'"
op|')'
newline|'\n'
name|'network'
op|'='
name|'fake_network'
op|'.'
name|'fake_network_obj'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'network_dict'
op|'='
name|'objects_base'
op|'.'
name|'obj_to_primitive'
op|'('
name|'network'
op|')'
newline|'\n'
name|'rpcapi'
op|'='
name|'network_rpcapi'
op|'.'
name|'NetworkAPI'
op|'('
op|')'
newline|'\n'
name|'call_mock'
op|'='
name|'mock'
op|'.'
name|'Mock'
op|'('
op|')'
newline|'\n'
name|'cctxt_mock'
op|'='
name|'mock'
op|'.'
name|'Mock'
op|'('
name|'call'
op|'='
name|'call_mock'
op|')'
newline|'\n'
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
string|"'can_send_version'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'False'
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'rpcapi'
op|'.'
name|'client'
op|','
string|"'prepare'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'cctxt_mock'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
nl|'\n'
name|'can_send_mock'
op|','
name|'prepare_mock'
nl|'\n'
op|')'
op|':'
newline|'\n'
indent|' '
name|'rpcapi'
op|'.'
name|'set_network_host'
op|'('
name|'ctxt'
op|','
name|'network'
op|')'
newline|'\n'
comment|'# assert our mocks were called as expected'
nl|'\n'
dedent|''
name|'can_send_mock'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'1.15'"
op|')'
newline|'\n'
name|'prepare_mock'
op|'.'
name|'assert_called_once_with'
op|'('
name|'version'
op|'='
string|"'1.0'"
op|')'
newline|'\n'
name|'call_mock'
op|'.'
name|'assert_called_once_with'
op|'('
name|'ctxt'
op|','
string|"'set_network_host'"
op|','
nl|'\n'
name|'network_ref'
op|'='
name|'network_dict'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rpc_setup_network_on_host
dedent|''
name|'def'
name|'test_rpc_setup_network_on_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'rpc_setup_network_on_host'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'network_id'
op|'='
string|"'fake_id'"
op|','
name|'teardown'
op|'='
name|'False'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rpc_allocate_fixed_ip
dedent|''
name|'def'
name|'test_rpc_allocate_fixed_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'_rpc_allocate_fixed_ip'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_id'
op|'='
string|"'fake_id'"
op|','
name|'network_id'
op|'='
string|"'fake_id'"
op|','
name|'address'
op|'='
string|"'addr'"
op|','
nl|'\n'
name|'vpn'
op|'='
name|'True'
op|','
name|'host'
op|'='
string|"'fake_host'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_deallocate_fixed_ip
dedent|''
name|'def'
name|'test_deallocate_fixed_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_db_instance'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'deallocate_fixed_ip'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'address'
op|'='
string|"'fake_addr'"
op|','
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'instance'
op|'='
name|'instance'
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.12'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_dns
dedent|''
name|'def'
name|'test_update_dns'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'update_dns'"
op|','
name|'rpc_method'
op|'='
string|"'cast'"
op|','
name|'fanout'
op|'='
name|'True'
op|','
nl|'\n'
name|'network_ids'
op|'='
string|"'fake_id'"
op|','
name|'version'
op|'='
string|"'1.3'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test__associate_floating_ip
dedent|''
name|'def'
name|'test__associate_floating_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'_associate_floating_ip'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'floating_address'
op|'='
string|"'fake_addr'"
op|','
name|'fixed_address'
op|'='
string|"'fixed_address'"
op|','
nl|'\n'
name|'interface'
op|'='
string|"'fake_interface'"
op|','
name|'host'
op|'='
string|"'fake_host'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake_uuid'"
op|','
name|'version'
op|'='
string|"'1.6'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test__disassociate_floating_ip
dedent|''
name|'def'
name|'test__disassociate_floating_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'_disassociate_floating_ip'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'address'
op|'='
string|"'fake_addr'"
op|','
name|'interface'
op|'='
string|"'fake_interface'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
name|'instance_uuid'
op|'='
string|"'fake_uuid'"
op|','
name|'version'
op|'='
string|"'1.6'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_migrate_instance_start
dedent|''
name|'def'
name|'test_migrate_instance_start'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'migrate_instance_start'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake_instance_uuid'"
op|','
nl|'\n'
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
nl|'\n'
name|'project_id'
op|'='
string|"'fake_project'"
op|','
nl|'\n'
name|'source_compute'
op|'='
string|"'fake_src_compute'"
op|','
nl|'\n'
name|'dest_compute'
op|'='
string|"'fake_dest_compute'"
op|','
nl|'\n'
name|'floating_addresses'
op|'='
string|"'fake_floating_addresses'"
op|','
nl|'\n'
name|'host'
op|'='
name|'self'
op|'.'
name|'DefaultArg'
op|'('
name|'None'
op|')'
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.2'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_migrate_instance_start_multi_host
dedent|''
name|'def'
name|'test_migrate_instance_start_multi_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'migrate_instance_start'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake_instance_uuid'"
op|','
nl|'\n'
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
nl|'\n'
name|'project_id'
op|'='
string|"'fake_project'"
op|','
nl|'\n'
name|'source_compute'
op|'='
string|"'fake_src_compute'"
op|','
nl|'\n'
name|'dest_compute'
op|'='
string|"'fake_dest_compute'"
op|','
nl|'\n'
name|'floating_addresses'
op|'='
string|"'fake_floating_addresses'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.2'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_migrate_instance_finish
dedent|''
name|'def'
name|'test_migrate_instance_finish'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'migrate_instance_finish'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake_instance_uuid'"
op|','
nl|'\n'
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
nl|'\n'
name|'project_id'
op|'='
string|"'fake_project'"
op|','
nl|'\n'
name|'source_compute'
op|'='
string|"'fake_src_compute'"
op|','
nl|'\n'
name|'dest_compute'
op|'='
string|"'fake_dest_compute'"
op|','
nl|'\n'
name|'floating_addresses'
op|'='
string|"'fake_floating_addresses'"
op|','
nl|'\n'
name|'host'
op|'='
name|'self'
op|'.'
name|'DefaultArg'
op|'('
name|'None'
op|')'
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.2'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_migrate_instance_finish_multi_host
dedent|''
name|'def'
name|'test_migrate_instance_finish_multi_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_test_network_api'
op|'('
string|"'migrate_instance_finish'"
op|','
name|'rpc_method'
op|'='
string|"'call'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake_instance_uuid'"
op|','
nl|'\n'
name|'rxtx_factor'
op|'='
string|"'fake_factor'"
op|','
nl|'\n'
name|'project_id'
op|'='
string|"'fake_project'"
op|','
nl|'\n'
name|'source_compute'
op|'='
string|"'fake_src_compute'"
op|','
nl|'\n'
name|'dest_compute'
op|'='
string|"'fake_dest_compute'"
op|','
nl|'\n'
name|'floating_addresses'
op|'='
string|"'fake_floating_addresses'"
op|','
nl|'\n'
name|'host'
op|'='
string|"'fake_host'"
op|','
nl|'\n'
name|'version'
op|'='
string|"'1.2'"
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 13.222352
| 88
| 0.620233
| 5,083
| 33,955
| 3.968719
| 0.052922
| 0.121648
| 0.073365
| 0.041937
| 0.876766
| 0.832102
| 0.770535
| 0.713478
| 0.644376
| 0.59649
| 0
| 0.003259
| 0.096451
| 33,955
| 2,567
| 89
| 13.227503
| 0.65427
| 0
| 0
| 0.929879
| 0
| 0
| 0.390664
| 0.064998
| 0
| 0
| 0
| 0
| 0.005843
| 0
| null | null | 0.000779
| 0.003896
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72ac0e29779b09b736990cedf9cec62695d16e21
| 21
|
py
|
Python
|
marketing_attribution_models/__init__.py
|
andretocci/Marketing-Attribution-Models
|
01e1b2de609515720a554b213070c1bc8ed24198
|
[
"Apache-2.0"
] | 126
|
2020-01-27T14:02:46.000Z
|
2022-03-30T20:50:47.000Z
|
marketing_attribution_models/__init__.py
|
andretocci/Marketing-Attribution-Models
|
01e1b2de609515720a554b213070c1bc8ed24198
|
[
"Apache-2.0"
] | 24
|
2020-05-23T12:11:55.000Z
|
2022-03-14T11:46:26.000Z
|
marketing_attribution_models/__init__.py
|
iago/Marketing-Attribution-Models
|
6d5e8ba879f0c4157a6c7795df991a1d5964cdc2
|
[
"Apache-2.0"
] | 40
|
2020-02-03T17:44:27.000Z
|
2022-03-15T16:20:43.000Z
|
from .MAM import MAM
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72dac89d8eee5f20f8ddb5c0bbea07080ecca3b7
| 26,172
|
py
|
Python
|
src/module_particle_systems.py
|
faycalki/medieval-conquests
|
113e13e2b166b79517c14f2c13f7561307a89f75
|
[
"MIT"
] | 7
|
2019-08-11T14:20:20.000Z
|
2021-11-21T06:48:24.000Z
|
src/module_particle_systems.py
|
faycalki/medieval-conquests
|
113e13e2b166b79517c14f2c13f7561307a89f75
|
[
"MIT"
] | null | null | null |
src/module_particle_systems.py
|
faycalki/medieval-conquests
|
113e13e2b166b79517c14f2c13f7561307a89f75
|
[
"MIT"
] | null | null | null |
from header_particle_systems import *
#psf_always_emit = 0x0000000002
#psf_global_emit_dir = 0x0000000010
#psf_emit_at_water_level = 0x0000000020
#psf_billboard_2d = 0x0000000100 # up_vec = dir, front rotated towards camera
#psf_billboard_3d = 0x0000000200 # front_vec point to camera.
#psf_turn_to_velocity = 0x0000000400
#psf_randomize_rotation = 0x0000001000
#psf_randomize_size = 0x0000002000
#psf_2d_turbulance = 0x0000010000
####################################################################################################################
# Each particle system contains the following fields:
#
# 1) Particle system id (string): used for referencing particle systems in other files.
# The prefix psys_ is automatically added before each particle system id.
# 2) Particle system flags (int). See header_particle_systems.py for a list of available flags
# 3) mesh-name.
####
# 4) Num particles per second: Number of particles emitted per second.
# 5) Particle Life: Each particle lives this long (in seconds).
# 6) Damping: How much particle's speed is lost due to friction.
# 7) Gravity strength: Effect of gravity. (Negative values make the particles float upwards.)
# 8) Turbulance size: Size of random turbulance (in meters)
# 9) Turbulance strength: How much a particle is affected by turbulance.
####
# 10,11) Alpha keys : Each attribute is controlled by two keys and
# 12,13) Red keys : each key has two fields: (time, magnitude)
# 14,15) Green keys : For example scale key (0.3,0.6) means
# 16,17) Blue keys : scale of each particle will be 0.6 at the
# 18,19) Scale keys : time 0.3 (where time=0 means creation and time=1 means end of the particle)
#
# The magnitudes are interpolated in between the two keys and remain constant beyond the keys.
# Except the alpha always starts from 0 at time 0.
####
# 20) Emit Box Size : The dimension of the box particles are emitted from.
# 21) Emit velocity : Particles are initially shot with this velocity.
# 22) Emit dir randomness
# 23) Particle rotation speed: Particles start to rotate with this (angular) speed (degrees per second).
# 24) Particle rotation damping: How quickly particles stop their rotation
####################################################################################################################
particle_systems = [
("game_rain", psf_always_emit|psf_global_emit_dir|psf_billboard_2d, "prtcl_rain",
500.0, 0.5, 0.33, 1.0, 10.0, 0.0,
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(8.2, 8.2, 0.2),
(0.0, 0.0, -10.0),
0.0,
0,
0.5
),
("game_snow", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_snow_fall_1",
150.0, 2.0, 0.2, 0.1, 30.0, 20.0,
(0.2, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(10.0, 10.0, 0.5),
(0.0, 0.0, -5.0),
1.0,
200,
0.5
),
# Required:
("game_blood", psf_billboard_3d |psf_randomize_size|psf_randomize_rotation, "prt_mesh_blood_1",
5000, 5.65, 3, 0.9, 10, 10, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0, 1), (1, 1), #alpha keys
(0.1, 0.9), (1, 0.9), #red keys
(0.1, 0.7), (1, 0.7), #green keys
(0.1, 0.7), (1, 0.7), #blue keys
(0.1, 0.02), (1, 0.15), #scale keys
(0, 0.05, 0), #emit box size
(0.6, 1.1, 1.2), #emit velocity
0, #emit dir randomness
0, #rotation speed
0, #rotation damping
),
("game_blood_2", psf_billboard_3d | psf_randomize_size|psf_randomize_rotation , "prt_mesh_blood_3",
5000, 0.8, 3, 1.1, 10, 10, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.6), (1, 0.01), #alpha keys
(0.1, 0.5), (1, 0.7), #red keys
(0.1, 0.5), (1, 0.7), #green keys
(0.1, 0.5), (1, 0.7), #blue keys
(0.1, 0.1), (1, 0.75), #scale keys
(0, 0.05, 0), #emit box size
(0.9, 0.1, 0.2 ), #emit velocity
0.9, #emit dir randomness
0, #rotation speed
0, #rotation damping
),
("game_hoof_dust", psf_billboard_3d|psf_randomize_rotation|psf_randomize_size|psf_2d_turbulance, "prt_mesh_dust_1",
5.0, 2.0, 10.0, 0.05, 10.0, 39.0,
(0.2, 0.5), (1.0, 0.0),
(0.0, 1.0), (1.0, 1.0),
(0.0, 0.9), (1.0, 0.9),
(0.0, 0.78), (1.0, 0.78),
(0.0, 2.0), (1.0, 3.5),
(0.2, 0.3, 0.2),
(0.0, 0.0, 3.9),
0.5,
130,
0.5
),
# ("game_hoof_dust_snow", psf_billboard_3d|psf_randomize_size, "prt_mesh_snow_dust_1",
# 6.0, 2.0, 3.5, 1.0, 10.0, 0.0,
# (0.2, 1.0), (1.0, 1.0),
# (0.0, 1.0), (1.0, 1.0),
# (0.0, 1.0), (1.0, 1.0),
# (0.0, 1.0), (1.0, 1.0),
# (0.5, 4.0), (1.0, 5.7),
# (0.2, 1.0, 0.1),
# (0.0, 0.0, 1.0),
# 2.0,
# 0,
# 0.0
# ),
("game_hoof_dust_mud", psf_billboard_2d|psf_randomize_rotation|psf_randomize_size|psf_2d_turbulance, "prt_mesh_dust_1",
5.0, 0.7, 10.0, 3.0, 0.0, 0.0,
(0.0, 1.0), (1.0, 1.0),
(0.0, 0.7), (1.0, 0.7),
(0.0, 0.6), (1.0, 0.6),
(0.0, 0.4), (1.0, 0.4),
(0.0, 0.2), (1.0, 0.22),
(0.15, 0.5, 0.1),
(0.0, 0.0, 15.0),
6.0,
200,
0.5
),
("game_water_splash_1", psf_emit_at_water_level|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prtcl_drop",
20.0, 0.85, 0.25, 0.9, 10.0, 0.0,
(0.3, 0.5), (1.0, 0.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(0.0, 0.3), (1.0, 0.18),
(0.3, 0.2, 0.1),
(0.0, 1.2, 2.3),
0.3,
50,
0.5
),
("game_water_splash_2", psf_emit_at_water_level|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prtcl_splash_b",
30.0, 0.4, 0.7, 0.5, 10.0, 0.0,
(0.3, 1.0), (1.0, 0.3),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(0.0, 0.25), (1.0, 0.7),
(0.4, 0.3, 0.1),
(0.0, 1.3, 1.1),
0.1,
50,
0.5
),
("game_water_splash_3", psf_emit_at_water_level, "prt_mesh_water_wave_1",
5.0, 2.0, 0.0, 0.0, 10.0, 0.0,
(0.03, 0.2), (1.0, 0.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(0.0, 3.0), (1.0, 10.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
0.0,
0,
0.5
),
("torch_fire", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
50.0, 0.35, 0.2, 0.03, 10.0, 0.0,
(0.5, 0.8), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.7), (1.0, 0.3),
(0.5, 0.2), (1.0, 0.0),
(0.0, 0.15), (0.4, 0.3),
(0.04, 0.04, 0.01),
(0.0, 0.0, 0.5),
0.0,
200,
0.5
),
("fire_glow_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d, "prt_mesh_fire_2",
2.0, 0.55, 0.2, 0.0, 10.0, 0.0,
(0.5, 0.9), (1.0, 0.0),
(0.0, 0.9), (1.0, 0.9),
(0.0, 0.7), (1.0, 0.7),
(0.0, 0.4), (1.0, 0.4),
(0.0, 2.0), (1.0, 2.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
0.0,
0,
0.0
),
("fire_glow_fixed", psf_global_emit_dir|psf_billboard_3d, "prt_mesh_fire_2",
4.0, 100.0, 0.2, 0.0, 10.0, 0.0,
(-0.01, 1.0), (1.0, 1.0),
(0.0, 0.9), (1.0, 0.9),
(0.0, 0.7), (1.0, 0.7),
(0.0, 0.4), (1.0, 0.4),
(0.0, 2.0), (1.0, 2.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
0.0,
0,
0.0
),
("torch_smoke", psf_always_emit|psf_global_emit_dir|psf_billboard_3d, "prtcl_dust_a",
15.0, 0.5, 0.2, -0.2, 10.0, 0.1,
(0.5, 0.25), (1.0, 0.0),
(0.0, 0.2), (1.0, 0.1),
(0.0, 0.2), (1.0, 0.09),
(0.0, 0.2), (1.0, 0.08),
(0.0, 0.5), (0.8, 2.5),
(0.1, 0.1, 0.1),
(0.0, 0.0, 1.5),
0.1,
0,
0.0
),
("flue_smoke_short", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prtcl_dust_a",
15.0, 1.5, 0.1, -0.0, 10.0, 12.0,
(0.0, 0.3), (1.0, 0.0),
(0.0, 0.2), (1.0, 0.1),
(0.0, 0.2), (1.0, 0.09),
(0.0, 0.2), (1.0, 0.08),
(0.0, 1.5), (1.0, 7.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 1.5),
0.1,
150,
0.8
),
("flue_smoke_tall", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prtcl_dust_a",
15.0, 3.0, 0.5, -0.0, 15.0, 12.0,
(0.0, 0.35), (1.0, 0.0),
(0.0, 0.3), (1.0, 0.1),
(0.0, 0.3), (1.0, 0.1),
(0.0, 0.3), (1.0, 0.1),
(0.0, 2.0), (1.0, 7.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 1.5),
0.1,
150,
0.5
),
("war_smoke_tall", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_smoke_1",
5.0, 12.0, 0.0, 0.0, 7.0, 7.0,
(0.0, 0.25), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 2.2), (1.0, 15.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 2.2),
0.1,
100,
0.2
),
("ladder_dust_6m", psf_billboard_3d, "prt_mesh_smoke_1",
700.0, 0.9, 0.0, 0.0, 7.0, 7.0,
(0.0, 0.25), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 2.0),
(0.75, 0.75, 3.5),
(0.0, 0.0, 0.0),
0.1,
100,
0.2
),
("ladder_dust_8m", psf_billboard_3d, "prt_mesh_smoke_1",
900.0, 0.9, 0.0, 0.0, 7.0, 7.0,
(0.0, 0.25), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 2.0),
(0.75, 0.75, 4.5),
(0.0, 0.0, 0.0),
0.1,
100,
0.2
),
("ladder_dust_10m", psf_billboard_3d, "prt_mesh_smoke_1",
1100.0, 0.9, 0.0, 0.0, 7.0, 7.0,
(0.0, 0.25), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 2.0),
(0.75, 0.75, 5.5),
(0.0, 0.0, 0.0),
0.1,
100,
0.2
),
("ladder_dust_12m", psf_billboard_3d, "prt_mesh_smoke_1",
1300.0, 0.9, 0.0, 0.0, 7.0, 7.0,
(0.0, 0.25), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 2.0),
(0.75, 0.75, 6.5),
(0.0, 0.0, 0.0),
0.1,
100,
0.2
),
("ladder_dust_14m", psf_billboard_3d, "prt_mesh_smoke_1",
1500.0, 0.9, 0.0, 0.0, 7.0, 7.0,
(0.0, 0.25), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 0.8),
(0.0, 1.0), (1.0, 2.0),
(0.75, 0.75, 7.5),
(0.0, 0.0, 0.0),
0.1,
100,
0.2
),
("ladder_straw_6m", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
700.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.3), (1.0, 0.3),
(0.75, 0.75, 3.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("ladder_straw_8m", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
900.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.3), (1.0, 0.3),
(0.75, 0.75, 4.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("ladder_straw_10m", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
1100.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.3), (1.0, 0.3),
(0.75, 0.75, 5.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("ladder_straw_12m", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
1300.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.3), (1.0, 0.3),
(0.75, 0.75, 6.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("ladder_straw_14m", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
1500.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.3), (1.0, 0.3),
(0.75, 0.75, 7.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("torch_fire_sparks", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_size, "prt_sparks_mesh_1",
10.0, 0.7, 0.2, 0.0, 10.0, 0.02,
(0.66, 1.0), (1.0, 0.0),
(0.1, 0.7), (1.0, 0.7),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.1), (1.0, 0.1),
(0.1, 0.05), (1.0, 0.05),
(0.1, 0.1, 0.1),
(0.0, 0.0, 0.9),
0.0,
0,
0.0
),
("fire_sparks_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_size, "prt_sparks_mesh_1",
10.0, 1.5, 0.2, 0.0, 3.0, 10.0,
(0.6, 1.0), (1.0, 1.0),
(0.1, 0.7), (1.0, 0.7),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.1), (1.0, 0.1),
(0.1, 0.07), (1.0, 0.03),
(0.17, 0.17, 0.01),
(0.0, 0.0, 1.0),
0.0,
0,
0.0
),
("pistol_smoke", psf_billboard_3d, "prtcl_dust_a",
90.0, 2.5, 0.6, -0.2, 60.0, 1.5,
(0.0, 0.75), (1.0, 0.0),
(0.0, 0.7), (1.0, 0.4),
(0.0, 0.7), (1.0, 0.4),
(0.0, 0.7), (1.0, 0.4),
(0.0, 1.5), (0.5, 11.0),
(0.1, 0.1, 0.1),
(2.0, 2.0, 0.0),
0.1,
0,
0.0
),
("brazier_fire_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
25.0, 0.5, 0.1, 0.0, 10.0, 0.0,
(0.5, 0.4), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.7), (1.0, 0.3),
(0.5, 0.2), (1.0, 0.0),
(0.1, 0.2), (1.0, 0.5),
(0.1, 0.1, 0.01),
(0.0, 0.0, 0.4),
0.0,
100,
0.2
),
("cooking_fire_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
25.0, 0.35, 0.1, 0.03, 10.0, 0.0,
(0.5, 0.8), (1.0, 0.0),
(0.5, 0.5), (1.0, 0.27),
(0.5, 0.35), (1.0, 0.09),
(0.5, 0.1), (1.0, 0.0),
(0.1, 0.5), (1.0, 1.0),
(0.05, 0.05, 0.01),
(0.0, 0.0, 1.0),
0.0,
200,
0.0
),
("cooking_smoke", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_smoke_1",
4.0, 4.0, 0.1, 0.0, 3.0, 5.0,
(0.2, 0.2), (1.0, 0.0),
(0.0, 0.8), (1.0, 1.0),
(0.0, 0.8), (1.0, 1.0),
(0.0, 0.85), (1.0, 1.0),
(0.0, 0.65), (1.0, 3.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 1.2),
0.0,
0,
0.0
),
("food_steam", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_steam_1",
3.0, 1.0, 0.0, 0.0, 8.0, 1.0,
(0.5, 0.1), (1.0, 0.0),
(0.0, 1.0), (1.0, 0.1),
(0.0, 1.0), (1.0, 0.1),
(0.0, 1.0), (1.0, 0.1),
(0.0, 0.2), (0.9, 0.5),
(0.05, 0.05, 0.0),
(0.0, 0.0, 0.1),
0.0,
100,
0.5
),
("candle_light", psf_always_emit|psf_global_emit_dir|psf_billboard_3d, "prt_mesh_candle_fire_1",
7.0, 1.1, 0.6, -0.0, 10.0, 0.2,
(0.1, 0.5), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.6), (1.0, 0.1),
(0.5, 0.2), (1.0, 0.0),
(0.3, 0.2), (1.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.09),
0.0,
0,
0.0
),
("candle_light_small", psf_always_emit|psf_global_emit_dir|psf_billboard_3d, "prt_mesh_candle_fire_1",
4.0, 1.1, 0.6, -0.0, 10.0, 0.2,
(0.1, 0.8), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.6), (1.0, 0.1),
(0.5, 0.2), (1.0, 0.0),
(0.3, 0.13), (1.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.06),
0.0,
0,
0.0
),
("lamp_fire", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
10.0, 0.8, 0.6, -0.0, 10.0, 0.4,
(0.1, 0.5), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.8), (1.0, 0.1),
(0.5, 0.4), (1.0, 0.0),
(0.3, 0.35), (0.9, 0.5),
(0.01, 0.01, 0.0),
(0.0, 0.0, 0.35),
0.03,
100,
0.5
),
("dummy_smoke", psf_billboard_3d|psf_randomize_size, "prt_mesh_dust_1",
500.0, 3.0, 15.0, -0.05, 10.0, 0.2,
(0.1, 0.5), (1.0, 0.0),
(0.1, 0.8), (1.0, 0.8),
(0.1, 0.7), (1.0, 0.7),
(0.1, 0.6), (1.0, 0.7),
(0.0, 0.7), (1.0, 2.2),
(0.2, 0.2, 0.5),
(0.0, 0.0, 0.05),
2.0,
10,
0.1
),
#Dummy straw was moved from here patched to an extent
("dummy_smoke_big", psf_billboard_3d|psf_randomize_size, "prt_mesh_dust_1",
500.0, 9.0, 15.0, -0.05, 10.0, 0.2,
(0.1, 0.9), (1.0, 0.0),
(0.1, 0.8), (1.0, 0.8),
(0.1, 0.7), (1.0, 0.7),
(0.1, 0.6), (1.0, 0.7),
(0.0, 5.0), (1.0, 15.0),
(3.0, 3.0, 5.0),
(0.0, 0.0, 0.05),
2.0,
10,
0.1
),
#Dummy straw big was moved from here patched to an extent
("gourd_smoke", psf_billboard_3d|psf_randomize_size, "prt_mesh_dust_1",
500.0, 3.0, 15.0, -0.05, 10.0, 0.2,
(0.1, 0.5), (1.0, 0.0),
(0.1, 0.8), (1.0, 0.8),
(0.1, 0.7), (1.0, 0.7),
(0.1, 0.6), (1.0, 0.7),
(0.0, 0.5), (1.0, 1.0),
(0.2, 0.2, 0.5),
(0.0, 0.0, 0.05),
2.0,
10,
0.1
),
("gourd_piece_1", psf_randomize_rotation, "prt_gourd_piece_1",
15.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 1.0), (1.0, 1.0),
(0.2, 0.2, 0.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("gourd_piece_2", psf_randomize_rotation|psf_randomize_size, "prt_gourd_piece_2",
50.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 1.0), (1.0, 1.0),
(0.2, 0.2, 0.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("fire_fly_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d, "prt_sparks_mesh_1",
2.0, 5.0, 1.2, 0.0, 50.0, 7.0,
(0.1, 0.8), (1.0, 0.2),
(0.5, 0.7), (1.0, 0.7),
(0.5, 0.8), (1.0, 0.8),
(0.5, 1.0), (1.0, 1.0),
(0.0, 0.1), (1.0, 0.1),
(20.0, 20.0, 0.5),
(0.0, 0.0, 0.0),
5.0,
0,
0.0
),
("bug_fly_1", psf_always_emit|psf_billboard_2d, "prt_mesh_rose_a",
20.0, 8.0, 0.02, 0.025, 1.0, 5.0,
(0.0, 1.0), (1.0, 1.0),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.25), (1.0, 0.25),
(10.0, 5.0, 0.1),
(0.0, 0.0, -0.9),
0.01,
10,
0.0
),
("moon_beam_1", psf_always_emit|psf_global_emit_dir|psf_billboard_2d|psf_randomize_size, "prt_mesh_moon_beam",
2.0, 4.0, 1.2, 0.0, 0.0, 0.0,
(0.5, 1.0), (1.0, 0.0),
(0.0, 0.4), (1.0, 0.4),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.6), (1.0, 0.6),
(0.0, 2.0), (1.0, 2.2),
(1.0, 1.0, 0.2),
(0.0, 0.0, -2.0),
0.0,
100,
0.5
),
("moon_beam_paricle_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_size, "prt_sparks_mesh_1",
10.0, 1.5, 1.5, 0.0, 10.0, 10.0,
(0.5, 1.0), (1.0, 0.0),
(0.5, 0.5), (1.0, 0.5),
(0.5, 0.7), (1.0, 0.7),
(0.5, 1.0), (1.0, 1.0),
(0.0, 0.1), (1.0, 0.1),
(1.0, 1.0, 4.0),
(0.0, 0.0, 0.0),
0.5,
0,
0.0
),
("night_smoke_1", psf_always_emit|psf_global_emit_dir|psf_billboard_3d, "prt_mesh_dust_1",
5.0, 10.0, 1.5, 0.0, 50.0, 2.0,
(0.3, 0.1), (1.0, 0.0),
(0.5, 0.5), (1.0, 0.5),
(0.5, 0.5), (1.0, 0.5),
(0.5, 0.5), (1.0, 0.6),
(0.0, 10.0), (1.0, 10.0),
(25.0, 25.0, 0.5),
(0.0, 1.0, 0.0),
2.0,
20,
1.0
),
("fireplace_fire_small", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
25.0, 0.8, 0.2, -0.1, 10.0, 0.0,
(0.5, 0.5), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.7), (1.0, 0.3),
(0.5, 0.2), (1.0, 0.0),
(0.0, 0.2), (1.0, 0.7),
(0.2, 0.1, 0.01),
(0.0, 0.0, 0.2),
0.1,
100,
0.5
),
("fireplace_fire_big", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
35.0, 0.6, 0.2, -0.2, 10.0, 0.0,
(0.5, 0.5), (1.0, 0.0),
(0.5, 1.0), (1.0, 0.9),
(0.5, 0.7), (1.0, 0.3),
(0.5, 0.2), (1.0, 0.0),
(0.0, 0.4), (1.0, 1.0),
(0.4, 0.2, 0.01),
(0.0, 0.0, 0.4),
0.1,
100,
0.5
),
("village_fire_big", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
50.0, 1.0, 0.0, -1.2, 25.0, 10.0,
(0.2, 0.7), (1.0, 0.0),
(0.2, 1.0), (1.0, 0.9),
(0.2, 0.7), (1.0, 0.3),
(0.2, 0.2), (1.0, 0.0),
(0.0, 2.0), (1.0, 6.0),
(2.2, 2.2, 0.2),
(0.0, 0.0, 0.0),
0.0,
250,
0.3
),
("village_fire_smoke_big", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_smoke_1",
30.0, 2.0, 0.3, -1.0, 50.0, 10.0,
(0.5, 0.15), (1.0, 0.0),
(0.2, 0.4), (1.0, 0.2),
(0.2, 0.4), (1.0, 0.2),
(0.2, 0.4), (1.0, 0.2),
(0.0, 6.0), (1.0, 8.0),
(2.0, 2.0, 1.0),
(0.0, 0.0, 5.0),
0.0,
0,
0.1
),
("map_village_fire", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_fire_1",
20.0, 1.0, 0.0, -0.2, 3.0, 3.0,
(0.2, 0.7), (1.0, 0.0),
(0.2, 1.0), (1.0, 0.9),
(0.2, 0.7), (1.0, 0.3),
(0.2, 0.2), (1.0, 0.0),
(0.0, 0.15), (1.0, 0.45),
(0.2, 0.2, 0.02),
(0.0, 0.0, 0.0),
0.0,
250,
0.3
),
("map_village_fire_smoke", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_smoke_1",
25.0, 2.5, 0.3, -0.15, 3.0, 3.0,
(0.5, 0.15), (1.0, 0.0),
(0.2, 0.4), (1.0, 0.3),
(0.2, 0.4), (1.0, 0.3),
(0.2, 0.4), (1.0, 0.3),
(0.0, 0.6), (1.0, 0.9),
(0.2, 0.2, 0.1),
(0.0, 0.0, 0.03),
0.0,
0,
0.1
),
("map_village_looted_smoke", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_smoke_1",
20.0, 3.0, 0.3, -0.11, 3.0, 2.0,
(0.5, 0.15), (1.0, 0.0),
(0.2, 0.5), (1.0, 0.5),
(0.2, 0.5), (1.0, 0.5),
(0.2, 0.5), (1.0, 0.5),
(0.0, 0.7), (1.0, 1.3),
(0.2, 0.2, 0.1),
(0.0, 0.0, 0.015),
0.0,
0,
0.1
),
("dungeon_water_drops", psf_always_emit|psf_global_emit_dir|psf_billboard_2d, "prtcl_rain",
1.0, 1.0, 0.33, 0.8, 0.0, 0.0,
(1.0, 0.2), (1.0, 0.2),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 0.8), (1.0, 0.8),
(0.05, 0.05, 0.5),
(0.0, 0.0, -5.0),
0.0,
0,
0.0
),
("wedding_rose", psf_always_emit|psf_billboard_2d, "prt_mesh_rose_a",
50.0, 8.0, 0.02, 0.025, 1.0, 5.0,
(0.0, 1.0), (1.0, 1.0),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.25), (1.0, 0.25),
(4.0, 4.0, 0.1),
(0.0, 0.0, -0.9),
0.01,
10,
0.0
),
("sea_foam_a", psf_always_emit|psf_turn_to_velocity|psf_randomize_size, "prt_foam_a",
1.0, 3.0, 1.0, 0.0, 0.0, 0.0,
(0.7, 0.1), (1.0, 0.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(0.0, 4.0), (1.0, 4.5),
(10.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
0.0,
0,
0.5
),
("fall_leafs_a", psf_always_emit|psf_billboard_2d, "prt_mesh_yrellow_leaf_a",
1.0, 9.0, 0.0, 0.025, 4.0, 4.0,
(0.0, 1.0), (1.0, 1.0),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.5), (1.0, 0.5),
(0.0, 0.25), (1.0, 0.25),
(4.0, 4.0, 4.0),
(0.0, 0.01, -0.9),
0.02,
15,
0.0
),
("desert_storm", psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_dust_1",
250.0, 2.0, 1.0, -0.15, 20.0, 40.0,
(0.2, 0.5), (1.0, 0.0),
(0.0, 1.0), (1.0, 1.0),
(0.0, 0.9), (1.0, 0.9),
(0.0, 0.78), (1.0, 0.78),
(0.0, 20.0), (1.0, 45.5),
(40.0, 40.0, 5.0),
(-20.0, 0.0, 3.0),
0.0,
130,
0.5
),
("blizzard", psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_dust_1",
250.0, 7.0, 1.0, 0.45, 20.0, 40.0,
(0.2, 0.5), (1.0, 0.5),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(0.0, 30.0), (1.0, 45.5),
(35.0, 35.0, 8.0),
(-35.0, 0.0, -8.0),
0.0,
130,
0.5
),
("rain", psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_dust_1",
400.0, 5.0, 1.0, 0.65, 20.0, 40.0,
(1.0, 0.3), (1.0, 0.3),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(1.0, 1.0), (1.0, 1.0),
(0.0, 20.0), (1.0, 45.5),
(35.0, 35.0, 8.0),
(-20.0, 0.0, -10.0),
0.0,
130,
0.5
),
("oil", psf_always_emit|psf_global_emit_dir|psf_billboard_3d|psf_randomize_rotation|psf_randomize_size, "prt_mesh_smoke_1",
30.0, 4.0, 0.1, 1.0, 3.0, 5.0,
(0.2, 0.7), (1.0, 2.0),
(0.0, 0.2), (1.0, 0.0),
(0.0, 0.2), (1.0, 0.0),
(0.0, 0.2), (1.0, 0.0),
(0.0, 0.65), (1.0, 3.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 1.2),
0.0,
0,
0.0
),
#New particles from CWE
("ship_shrapnel", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
5000, 1, 2, 1.5, 10, 2,
(0.1, 1), (1, 1),
(0.1, 0.6), (1, 0.6),
(0.1, 0.5), (1, 0.5),
(0.1, 0.4), (1, 0.4),
(0, 1.5), (5, 1.5),
(2, 2, 5),
(0, 0, 0),
2.3,
200, 0
),
("lanse", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
2000, 3, 2, 0.7, 10, 2,
(0.1, 1), (1, 1),
(0.1, 0.6), (1, 0.6),
(0.1, 0.5), (1, 0.5),
(0.1, 0.4), (1, 0.4),
(0, 0.8), (1, 0.8),
(2.5, 2.5, 2.5),
(0, 0, 0),
2.3,
200, 0
),
("lanse_straw", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
2000, 3, 2, 0.7, 10, 2,
(0.1, 1), (1, 1),
(0.1, 0.6), (1, 0.6),
(0.1, 0.5), (1, 0.5),
(0.1, 0.4), (1, 0.4),
(0, 0.8), (1, 0.8),
(2.5, 2.5, 2.5),
(0, 0, 0),
2.3,
200, 0
),
("dummy_straw", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
500.0, 1.0, 2.0, 0.9, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.3), (1.0, 0.3),
(0.2, 0.2, 0.5),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("dummy_straw_big", psf_randomize_rotation|psf_randomize_size, "prt_mesh_straw_1",
500.0, 3.0, 2.0, 2.0, 10.0, 2.0,
(0.1, 1.0), (1.0, 1.0),
(0.1, 0.6), (1.0, 0.6),
(0.1, 0.5), (1.0, 0.5),
(0.1, 0.4), (1.0, 0.4),
(0.0, 0.8), (1.0, 0.8),
(3.0, 3.0, 3.0),
(0.0, 0.0, 0.0),
2.3,
200,
0.0
),
("lanse_blood", psf_billboard_3d|psf_billboard_drop|psf_randomize_rotation|psf_randomize_size, "prt_mesh_blood_3",
2000, 0.6, 3, 0.3, 0, 0,
(0, 0.25), (0.7, 0.1),
(0.1, 0.7), (1, 0.7),
(0.1, 0.7), (1, 0.7),
(0.1, 0.7), (1, 0.7),
(0.1, 0.4), (0.5, 0.35),
(1.2, 1.2, 1.2),
(0.4, 0.4, 0),
0.3,
150, 0
),
#End new particles
("blood_decapitation", psf_billboard_3d|psf_billboard_drop|psf_randomize_rotation|psf_randomize_size, "prt_blood_decapitation", #here
2000, 0.6, 3, 0.3, 0, 0,
(0, 0.25), (0.7, 0.1),
(0.1, 0.7), (1, 0.7),
(0.1, 0.7), (1, 0.7),
(0.1, 0.7), (1, 0.7),
(0, 0.15), (1, 0.35),
(0.01, 0.2, 0.01),
(0.2, 0.3, 0),
0.3,
150, 0
),
#End new particles
]
| 25.785222
| 145
| 0.504585
| 6,238
| 26,172
| 1.982206
| 0.046489
| 0.197655
| 0.154792
| 0.128751
| 0.779458
| 0.747675
| 0.719612
| 0.684432
| 0.650142
| 0.635423
| 0
| 0.25879
| 0.206709
| 26,172
| 1,015
| 146
| 25.785222
| 0.336769
| 0.11134
| 0
| 0.693878
| 0
| 0
| 0.087108
| 0.00776
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001134
| 0
| 0.001134
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72e38590b6794b0ddefd12326ef038fd6d9d1170
| 326
|
py
|
Python
|
owslib/owscontext/__init__.py
|
jannefleischer/OWSLib
|
e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad
|
[
"BSD-3-Clause"
] | 218
|
2015-01-09T12:55:09.000Z
|
2022-03-29T12:22:54.000Z
|
owslib/owscontext/__init__.py
|
jannefleischer/OWSLib
|
e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad
|
[
"BSD-3-Clause"
] | 512
|
2015-01-01T09:52:58.000Z
|
2022-03-30T11:57:07.000Z
|
owslib/owscontext/__init__.py
|
jannefleischer/OWSLib
|
e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad
|
[
"BSD-3-Clause"
] | 218
|
2015-01-01T09:44:06.000Z
|
2022-03-31T14:09:13.000Z
|
# -*- coding: utf-8 -*-
# =============================================================================
# Authors : Alexander Kmoch <allixender@gmail.com>
#
# =============================================================================
"""
place for some constants to avoid circular imports
"""
from owslib.util import log
| 27.166667
| 79
| 0.352761
| 22
| 326
| 5.227273
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003401
| 0.09816
| 326
| 11
| 80
| 29.636364
| 0.387755
| 0.852761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
be34605855aeb148c267eb97b3355f0266cc9873
| 1,466
|
py
|
Python
|
euler/0/08.py
|
PandaDrunkard/proex
|
c303f051721d9f271d8187957a4458dc5f4558b1
|
[
"MIT"
] | null | null | null |
euler/0/08.py
|
PandaDrunkard/proex
|
c303f051721d9f271d8187957a4458dc5f4558b1
|
[
"MIT"
] | null | null | null |
euler/0/08.py
|
PandaDrunkard/proex
|
c303f051721d9f271d8187957a4458dc5f4558b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def calculate(n, d):
s = str(n)
m = -1
for i in range(0, len(s) - d + 1):
p = 1
for j in range(0, d):
p *= int(s[i+j])
if p > m:
m = p
return m
# https://stackoverflow.com/questions/12385040/python-defining-an-integer-variable-over-multiple-lines
num = int("""
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450""".replace("\n",""))
print(calculate(num, 4))
print(calculate(num, 13))
| 36.65
| 102
| 0.843111
| 90
| 1,466
| 13.733333
| 0.7
| 0.006472
| 0.012945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.767547
| 0.09618
| 1,466
| 39
| 103
| 37.589744
| 0.165283
| 0.097544
| 0
| 0
| 0
| 0
| 0.774242
| 0.757576
| 0
| 1
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0
| 0
| 0.060606
| 0.060606
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be4d92c5dba9162c586a5064616b4801aa28c4d9
| 7,772
|
py
|
Python
|
pygcdm/protogen/gcdm_server_pb2_grpc.py
|
rmcsqrd/pygcdm
|
0cef2d8220c3fdd9112510f6040a0c9c5e248a25
|
[
"BSD-3-Clause"
] | 2
|
2021-08-31T16:32:08.000Z
|
2021-09-03T22:13:27.000Z
|
pygcdm/protogen/gcdm_server_pb2_grpc.py
|
rmcsqrd/netcdf-grpc
|
0cef2d8220c3fdd9112510f6040a0c9c5e248a25
|
[
"BSD-3-Clause"
] | 4
|
2021-09-07T15:26:07.000Z
|
2021-09-13T21:06:15.000Z
|
pygcdm/protogen/gcdm_server_pb2_grpc.py
|
rmcsqrd/netcdf-grpc
|
0cef2d8220c3fdd9112510f6040a0c9c5e248a25
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from pygcdm.protogen import gcdm_grid_pb2 as pygcdm_dot_protogen_dot_gcdm__grid__pb2
from pygcdm.protogen import gcdm_netcdf_pb2 as pygcdm_dot_protogen_dot_gcdm__netcdf__pb2
class GcdmStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetNetcdfHeader = channel.unary_unary(
'/ucar.gcdm.Gcdm/GetNetcdfHeader',
request_serializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.HeaderRequest.SerializeToString,
response_deserializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.HeaderResponse.FromString,
)
self.GetNetcdfData = channel.unary_stream(
'/ucar.gcdm.Gcdm/GetNetcdfData',
request_serializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.DataRequest.SerializeToString,
response_deserializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.DataResponse.FromString,
)
self.GetGridDataset = channel.unary_unary(
'/ucar.gcdm.Gcdm/GetGridDataset',
request_serializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDatasetRequest.SerializeToString,
response_deserializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDatasetResponse.FromString,
)
self.GetGridData = channel.unary_stream(
'/ucar.gcdm.Gcdm/GetGridData',
request_serializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDataRequest.SerializeToString,
response_deserializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDataResponse.FromString,
)
class GcdmServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetNetcdfHeader(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNetcdfData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetGridDataset(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetGridData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GcdmServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetNetcdfHeader': grpc.unary_unary_rpc_method_handler(
servicer.GetNetcdfHeader,
request_deserializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.HeaderRequest.FromString,
response_serializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.HeaderResponse.SerializeToString,
),
'GetNetcdfData': grpc.unary_stream_rpc_method_handler(
servicer.GetNetcdfData,
request_deserializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.DataRequest.FromString,
response_serializer=pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.DataResponse.SerializeToString,
),
'GetGridDataset': grpc.unary_unary_rpc_method_handler(
servicer.GetGridDataset,
request_deserializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDatasetRequest.FromString,
response_serializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDatasetResponse.SerializeToString,
),
'GetGridData': grpc.unary_stream_rpc_method_handler(
servicer.GetGridData,
request_deserializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDataRequest.FromString,
response_serializer=pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ucar.gcdm.Gcdm', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Gcdm(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetNetcdfHeader(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ucar.gcdm.Gcdm/GetNetcdfHeader',
pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.HeaderRequest.SerializeToString,
pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.HeaderResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetNetcdfData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/ucar.gcdm.Gcdm/GetNetcdfData',
pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.DataRequest.SerializeToString,
pygcdm_dot_protogen_dot_gcdm__netcdf__pb2.DataResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetGridDataset(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ucar.gcdm.Gcdm/GetGridDataset',
pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDatasetRequest.SerializeToString,
pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDatasetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetGridData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/ucar.gcdm.Gcdm/GetGridData',
pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDataRequest.SerializeToString,
pygcdm_dot_protogen_dot_gcdm__grid__pb2.GridDataResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 46.538922
| 118
| 0.687983
| 754
| 7,772
| 6.679045
| 0.14191
| 0.046465
| 0.087768
| 0.103257
| 0.821882
| 0.810763
| 0.787331
| 0.742057
| 0.707903
| 0.486696
| 0
| 0.004752
| 0.241894
| 7,772
| 166
| 119
| 46.819277
| 0.849966
| 0.080932
| 0
| 0.481203
| 1
| 0
| 0.06859
| 0.033093
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075188
| false
| 0
| 0.022556
| 0.030075
| 0.150376
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be617153ffe2e46622204ad779f10f5b090b00e3
| 71
|
py
|
Python
|
string-1/make_tags.py
|
ibLeDy/codingbat-python
|
246df68940f1bb3b25bdc070906ad2ba42b0c447
|
[
"MIT"
] | null | null | null |
string-1/make_tags.py
|
ibLeDy/codingbat-python
|
246df68940f1bb3b25bdc070906ad2ba42b0c447
|
[
"MIT"
] | null | null | null |
string-1/make_tags.py
|
ibLeDy/codingbat-python
|
246df68940f1bb3b25bdc070906ad2ba42b0c447
|
[
"MIT"
] | null | null | null |
def make_tags(tag, word):
return '<{}>{}</{}>'.format(tag, word, tag)
| 35.5
| 45
| 0.577465
| 10
| 71
| 4
| 0.7
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 2
| 45
| 35.5
| 0.634921
| 0
| 0
| 0
| 0
| 0
| 0.152778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
beb0dedc7af2a70ce107637542c88ce70d22f02d
| 85
|
py
|
Python
|
code/answer_2-1-10.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/answer_2-1-10.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/answer_2-1-10.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
H, W = map(int, input().split())
h, w = map(int, input().split())
print((H-h)*(W-w))
| 21.25
| 32
| 0.529412
| 17
| 85
| 2.647059
| 0.411765
| 0.133333
| 0.222222
| 0.355556
| 0.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129412
| 85
| 3
| 33
| 28.333333
| 0.608108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe29d1fec3c88c28389083f62dc75c364804f676
| 82,139
|
py
|
Python
|
tests/core/featurizers/test_tracker_featurizer.py
|
fintzd/rasa
|
6359be5509c7d87cd29c2ab5149bc45e843fea85
|
[
"Apache-2.0"
] | 9,701
|
2019-04-16T15:46:27.000Z
|
2022-03-31T11:52:18.000Z
|
tests/core/featurizers/test_tracker_featurizer.py
|
fintzd/rasa
|
6359be5509c7d87cd29c2ab5149bc45e843fea85
|
[
"Apache-2.0"
] | 6,420
|
2019-04-16T15:58:22.000Z
|
2022-03-31T17:54:35.000Z
|
tests/core/featurizers/test_tracker_featurizer.py
|
fintzd/rasa
|
6359be5509c7d87cd29c2ab5149bc45e843fea85
|
[
"Apache-2.0"
] | 3,063
|
2019-04-16T15:23:52.000Z
|
2022-03-31T00:01:12.000Z
|
from typing import Text, Dict, List, Optional
import numpy as np
import pytest
from rasa.core.featurizers.single_state_featurizer import SingleStateFeaturizer
from rasa.core.featurizers.single_state_featurizer import (
IntentTokenizerSingleStateFeaturizer,
)
from rasa.core.featurizers.tracker_featurizers import (
TrackerFeaturizer as TrackerFeaturizer,
)
from rasa.core.featurizers.tracker_featurizers import MaxHistoryTrackerFeaturizer
from rasa.core.featurizers.tracker_featurizers import IntentMaxHistoryTrackerFeaturizer
from rasa.core.featurizers.tracker_featurizers import FullDialogueTrackerFeaturizer
from rasa.shared.core.domain import Domain
from tests.core.utilities import user_uttered
from rasa.shared.nlu.training_data.features import Features
from rasa.shared.nlu.constants import INTENT, ACTION_NAME
from rasa.shared.core.constants import (
ACTION_LISTEN_NAME,
ACTION_UNLIKELY_INTENT_NAME,
USER,
PREVIOUS_ACTION,
)
from rasa.shared.core.events import ActionExecuted
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.utils.tensorflow.constants import LABEL_PAD_ID
from rasa.core.exceptions import InvalidTrackerFeaturizerUsageError
def test_fail_to_load_non_existent_featurizer():
assert TrackerFeaturizer.load("non_existent_class") is None
def test_persist_and_load_tracker_featurizer(tmp_path: Text, moodbot_domain: Domain):
state_featurizer = SingleStateFeaturizer()
state_featurizer.prepare_for_training(moodbot_domain)
tracker_featurizer = MaxHistoryTrackerFeaturizer(state_featurizer)
tracker_featurizer.persist(tmp_path)
loaded_tracker_featurizer = TrackerFeaturizer.load(tmp_path)
assert loaded_tracker_featurizer is not None
assert loaded_tracker_featurizer.state_featurizer is not None
def test_convert_action_labels_to_ids(domain: Domain):
trackers_as_actions = [
["utter_greet", "utter_channel"],
["utter_greet", "utter_default", "utter_goodbye"],
]
tracker_featurizer = TrackerFeaturizer()
actual_output = tracker_featurizer._convert_labels_to_ids(
trackers_as_actions, domain
)
expected_output = np.array(
[
np.array(
[
domain.action_names_or_texts.index("utter_greet"),
domain.action_names_or_texts.index("utter_channel"),
],
),
np.array(
[
domain.action_names_or_texts.index("utter_greet"),
domain.action_names_or_texts.index("utter_default"),
domain.action_names_or_texts.index("utter_goodbye"),
],
),
],
)
assert expected_output.size == actual_output.size
for expected_array, actual_array in zip(expected_output, actual_output):
assert np.all(expected_array == actual_array)
def test_convert_intent_labels_to_ids(domain: Domain):
trackers_as_intents = [
["next_intent", "nlu_fallback", "out_of_scope", "restart"],
["greet", "hello", "affirm"],
]
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer()
actual_labels = tracker_featurizer._convert_labels_to_ids(
trackers_as_intents, domain
)
expected_labels = np.array(
[
[
domain.intents.index("next_intent"),
domain.intents.index("nlu_fallback"),
domain.intents.index("out_of_scope"),
domain.intents.index("restart"),
],
[
domain.intents.index("greet"),
domain.intents.index("hello"),
domain.intents.index("affirm"),
LABEL_PAD_ID,
],
],
)
assert expected_labels.size == actual_labels.size
assert expected_labels.shape == actual_labels.shape
assert np.all(expected_labels == actual_labels)
def test_featurize_trackers_raises_on_missing_state_featurizer(domain: Domain):
tracker_featurizer = TrackerFeaturizer()
with pytest.raises(InvalidTrackerFeaturizerUsageError):
tracker_featurizer.featurize_trackers([], domain, precomputations=None)
def compare_featurized_states(
states1: List[Dict[Text, List[Features]]], states2: List[Dict[Text, List[Features]]]
) -> bool:
"""Compares two lists of featurized states and returns True if they
are identical and False otherwise.
"""
if len(states1) != len(states2):
return False
for state1, state2 in zip(states1, states2):
if state1.keys() != state2.keys():
return False
for key in state1.keys():
for feature1, feature2 in zip(state1[key], state2[key]):
if np.any((feature1.features != feature2.features).toarray()):
return False
if feature1.origin != feature2.origin:
return False
if feature1.attribute != feature2.attribute:
return False
if feature1.type != feature2.type:
return False
return True
def test_featurize_trackers_with_full_dialogue_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0, 13, 14, 0, 15]])
assert actual_labels is not None
assert len(actual_labels) == 1
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featurizer(
moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_cheer_up"),
ActionExecuted("utter_did_that_help"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("deny"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0, 13, 14, 0, 15]])
assert actual_labels is not None
assert len(actual_labels) == 1
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
def test_trackers_keep_action_unlikely_intent_with_full_dialogue_tracker_featurizer(
moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_cheer_up"),
ActionExecuted("utter_did_that_help"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("deny"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 9, 16, 0, 9, 13, 14, 0, 9, 15]])
assert actual_labels is not None
assert len(actual_labels) == 1
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
def test_create_state_features_full_dialogue_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_goodbye"]]},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
def test_state_features_ignore_action_unlikely_intent_full_dialogue_tracker_featurizer(
moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_great"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["goodbye"]],
},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
def test_state_features_keep_action_unlikely_intent_full_dialogue_tracker_featurizer(
moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_great"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["goodbye"]],
},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
def test_prediction_states_with_full_dialogue_tracker_featurizer(
moodbot_tracker: DialogueStateTracker, moodbot_domain: Domain
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
actual_states = tracker_featurizer.prediction_states(
[moodbot_tracker], moodbot_domain,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_unhappy"},
},
{
USER: {INTENT: "mood_unhappy"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_cheer_up"},
},
{
USER: {INTENT: "mood_unhappy"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_did_that_help"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "deny"},
},
{USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"},},
]
]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
def test_prediction_states_hide_rule_states_with_full_dialogue_tracker_featurizer(
moodbot_domain: Domain,
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
rule_tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet", hide_rule_turn=True),
ActionExecuted(ACTION_LISTEN_NAME, hide_rule_turn=True),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[rule_tracker], moodbot_domain, ignore_rule_only_turns=True,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
],
]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
embedded_rule_tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet", hide_rule_turn=True),
ActionExecuted(ACTION_LISTEN_NAME, hide_rule_turn=True),
user_uttered("mood_great"),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
},
]
]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
def test_prediction_states_ignore_action_intent_unlikely_full_dialogue_featurizer(
moodbot_domain: Domain,
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[tracker], moodbot_domain, ignore_action_unlikely_intent=True
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "goodbye"},
},
]
]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
def test_prediction_states_keeps_action_intent_unlikely_full_dialogue_featurizer(
moodbot_domain: Domain,
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain,)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{
USER: {INTENT: "greet"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "goodbye"},
},
]
]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 2])
def test_featurize_trackers_with_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0, 13, 14, 0, 15]]).T
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
assert np.all(actual_labels == expected_labels)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize("max_history", [None, 2])
def test_featurize_trackers_ignore_action_unlikely_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history,
)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0]]).T
assert actual_labels.shape == expected_labels.shape
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize("max_history", [None, 2])
def test_featurize_trackers_keep_action_unlikely_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history,
)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 9, 16, 0]]).T
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize(
"remove_duplicates,max_history",
[[True, None], [True, 2], [False, None], [False, 2],],
)
def test_deduplicate_featurize_trackers_with_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
remove_duplicates: bool,
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history, remove_duplicates=remove_duplicates
)
# Add Duplicate moodbot_tracker states should get removed.
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
if not remove_duplicates:
expected_features = expected_features * 2
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0, 13, 14, 0, 15]]).T
if not remove_duplicates:
expected_labels = np.vstack([expected_labels] * 2)
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
assert np.all(actual_labels == expected_labels)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize("max_history", [None, 2])
def test_create_state_features_with_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_goodbye"]]},
]
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
@pytest.mark.parametrize("max_history", [None, 2])
def test_create_state_features_ignore_action_unlikely_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_great"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["goodbye"]],
},
]
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
@pytest.mark.parametrize("max_history", [None, 2])
def test_create_state_features_keep_action_unlikely_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_great"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["goodbye"]],
},
]
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
@pytest.mark.parametrize("max_history", [None, 2])
def test_prediction_states_with_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
actual_states = tracker_featurizer.prediction_states(
[moodbot_tracker], moodbot_domain,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_unhappy"},
},
{
USER: {INTENT: "mood_unhappy"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_cheer_up"},
},
{
USER: {INTENT: "mood_unhappy"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_did_that_help"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "deny"},
},
{USER: {INTENT: "deny"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_goodbye"},},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 2])
def test_prediction_states_hide_rule_states_with_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
rule_tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet", hide_rule_turn=True),
ActionExecuted(ACTION_LISTEN_NAME, hide_rule_turn=True),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[rule_tracker], moodbot_domain, ignore_rule_only_turns=True,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
],
]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
embedded_rule_tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet", hide_rule_turn=True),
ActionExecuted(ACTION_LISTEN_NAME, hide_rule_turn=True),
user_uttered("mood_great"),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 3])
def test_prediction_states_ignores_action_intent_unlikely_max_history_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[tracker], moodbot_domain, ignore_action_unlikely_intent=True
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "goodbye"},
},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 3])
def test_prediction_states_keeps_action_intent_unlikely_max_history_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = MaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain,)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{
USER: {INTENT: "greet"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "goodbye"},
},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize(
"max_history,moodbot_features",
[
[None, "IntentTokenizerSingleStateFeaturizer"],
[2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_featurize_trackers_with_intent_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[5, 7, 3]]).T
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
assert np.all(actual_labels == expected_labels)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize(
"max_history, moodbot_features",
[
[None, "IntentTokenizerSingleStateFeaturizer"],
[2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_trackers_ignore_action_unlikely_intent_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
],
domain=moodbot_domain,
)
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history,
)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[5, 7]]).T
assert actual_labels.shape == expected_labels.shape
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize(
"max_history,moodbot_features",
[
[None, "IntentTokenizerSingleStateFeaturizer"],
[2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_trackers_keep_action_unlikely_intent_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
],
domain=moodbot_domain,
)
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history,
)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[5, 7]]).T
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize(
"remove_duplicates,max_history,moodbot_features",
[
[True, None, "IntentTokenizerSingleStateFeaturizer"],
[True, 2, "IntentTokenizerSingleStateFeaturizer"],
[False, None, "IntentTokenizerSingleStateFeaturizer"],
[False, 2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_deduplicate_featurize_trackers_with_intent_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
remove_duplicates: bool,
max_history: Optional[int],
):
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history, remove_duplicates=remove_duplicates
)
# Add Duplicate moodbot_tracker states should get removed.
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[moodbot_tracker, moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[{},],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
],
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
if not remove_duplicates:
expected_features = expected_features * 2
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[5, 7, 3]]).T
if not remove_duplicates:
expected_labels = np.vstack([expected_labels] * 2)
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
assert np.all(actual_labels == expected_labels)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
@pytest.mark.parametrize(
"max_history,moodbot_features",
[
[None, "IntentTokenizerSingleStateFeaturizer"],
[2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_create_state_features_with_intent_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
# IntentMaxHistoryTrackerFeaturizer prediction is only done after
# a UserUttered event so remove the last BotUttered and
# ActionExecuted events.
moodbot_tracker = moodbot_tracker.copy()
moodbot_tracker.events.pop()
moodbot_tracker.events.pop()
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
@pytest.mark.parametrize(
"max_history,moodbot_features",
[
[None, "IntentTokenizerSingleStateFeaturizer"],
[2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_state_features_ignore_action_unlikely_intent_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_great"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
@pytest.mark.parametrize(
"max_history,moodbot_features",
[
[None, "IntentTokenizerSingleStateFeaturizer"],
[2, "IntentTokenizerSingleStateFeaturizer"],
],
indirect=["moodbot_features"],
)
def test_state_features_keep_action_unlikely_intent_intent_max_history_featurizer(
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
max_history: Optional[int],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
state_featurizer.prepare_for_training(moodbot_domain)
actual_features = tracker_featurizer.create_state_features(
[tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_great"]],
},
{ACTION_NAME: [moodbot_features["actions"][ACTION_UNLIKELY_INTENT_NAME]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_happy"]]},
],
]
if max_history is not None:
expected_features = [x[-max_history:] for x in expected_features]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
@pytest.mark.parametrize("max_history", [None, 2])
def test_prediction_states_with_intent_max_history_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
# IntentMaxHistoryTrackerFeaturizer prediction is only done after
# a UserUttered event so remove the last BotUttered and
# ActionExecuted events.
moodbot_tracker = moodbot_tracker.copy()
moodbot_tracker.events.pop()
moodbot_tracker.events.pop()
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
actual_states = tracker_featurizer.prediction_states(
[moodbot_tracker], moodbot_domain,
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_unhappy"},
},
{
USER: {INTENT: "mood_unhappy"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_cheer_up"},
},
{
USER: {INTENT: "mood_unhappy"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_did_that_help"},
},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 2])
def test_prediction_states_hide_rule_states_intent_max_history_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
rule_tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet", hide_rule_turn=True),
ActionExecuted(ACTION_LISTEN_NAME, hide_rule_turn=True),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[rule_tracker], moodbot_domain, ignore_rule_only_turns=True,
)
expected_states = [[{}]]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
embedded_rule_tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet", hide_rule_turn=True),
ActionExecuted(ACTION_LISTEN_NAME, hide_rule_turn=True),
user_uttered("mood_great"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[embedded_rule_tracker], moodbot_domain, ignore_rule_only_turns=True,
)
expected_states = [[{},]]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 3])
def test_prediction_states_ignores_action_intent_unlikely_intent_max_history_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states(
[tracker], moodbot_domain, ignore_action_unlikely_intent=True
)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize("max_history", [None, 3])
def test_prediction_states_keeps_action_intent_unlikely_intent_max_history_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
max_history: Optional[int],
):
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history
)
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_happy"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("goodbye"),
],
domain=moodbot_domain,
)
actual_states = tracker_featurizer.prediction_states([tracker], moodbot_domain)
expected_states = [
[
{},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "greet"},
},
{
USER: {INTENT: "greet"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME},
},
{USER: {INTENT: "greet"}, PREVIOUS_ACTION: {ACTION_NAME: "utter_greet"},},
{
PREVIOUS_ACTION: {ACTION_NAME: ACTION_LISTEN_NAME},
USER: {INTENT: "mood_great"},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: ACTION_UNLIKELY_INTENT_NAME},
},
{
USER: {INTENT: "mood_great"},
PREVIOUS_ACTION: {ACTION_NAME: "utter_happy"},
},
]
]
if max_history is not None:
expected_states = [x[-max_history:] for x in expected_states]
assert actual_states is not None
assert len(actual_states) == len(expected_states)
for actual, expected in zip(actual_states, expected_states):
assert actual == expected
@pytest.mark.parametrize(
"remove_duplicates, max_history",
[[True, None], [True, 2], [False, None], [False, 2],],
)
def test_multilabels_with_intent_max_history_tracker_featurizer(
moodbot_domain: Domain, max_history: Optional[int], remove_duplicates: bool
):
state_featurizer = IntentTokenizerSingleStateFeaturizer()
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer(
state_featurizer, max_history=max_history, remove_duplicates=remove_duplicates,
)
event_list1 = [
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_great"),
]
tracker1 = DialogueStateTracker.from_events(
"default", event_list1, domain=moodbot_domain
)
event_list2 = [
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
]
tracker2 = DialogueStateTracker.from_events(
"default", event_list2, domain=moodbot_domain
)
_1, actual_labels, _2 = tracker_featurizer.featurize_trackers(
[tracker1, tracker2], moodbot_domain, precomputations=None,
)
greet_index = 5
mood_great_index = 6
mood_unhappy_index = 7
if remove_duplicates:
expected_labels = np.array(
[[greet_index, -1], [mood_great_index, mood_unhappy_index],]
)
else:
expected_labels = np.array(
[
[greet_index, -1],
[mood_great_index, mood_unhappy_index],
[greet_index, -1],
[mood_great_index, mood_unhappy_index],
]
)
assert actual_labels is not None
assert actual_labels.shape == expected_labels.shape
# Order of label indices may be different,
# hence need to sort the indices and then check.
for actual_label_indices, expected_label_indices in zip(
actual_labels, expected_labels
):
assert sorted(actual_label_indices) == sorted(expected_label_indices)
| 35.162243
| 88
| 0.630017
| 7,801
| 82,139
| 6.265351
| 0.028971
| 0.075497
| 0.054669
| 0.073656
| 0.937679
| 0.932114
| 0.92929
| 0.918222
| 0.91327
| 0.910938
| 0
| 0.002659
| 0.2674
| 82,139
| 2,335
| 89
| 35.177302
| 0.809572
| 0.012065
| 0
| 0.690819
| 0
| 0
| 0.075869
| 0.009752
| 0
| 0
| 0
| 0
| 0.078412
| 1
| 0.019355
| false
| 0
| 0.008933
| 0
| 0.031762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe39c3d4413962982ebda1a16a90246a5fdf2ec2
| 12,136
|
py
|
Python
|
pandas/tests/plotting/frame/test_hist_box_by.py
|
luftwurzel/pandas
|
8980af7ce9d98713b0f8792e38f0fe43088e8780
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T01:38:03.000Z
|
2022-03-29T01:38:03.000Z
|
pandas/tests/plotting/frame/test_hist_box_by.py
|
luftwurzel/pandas
|
8980af7ce9d98713b0f8792e38f0fe43088e8780
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T02:15:07.000Z
|
2022-03-08T02:15:07.000Z
|
pandas/tests/plotting/frame/test_hist_box_by.py
|
luftwurzel/pandas
|
8980af7ce9d98713b0f8792e38f0fe43088e8780
|
[
"BSD-3-Clause"
] | 1
|
2022-03-22T11:50:25.000Z
|
2022-03-22T11:50:25.000Z
|
import re
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import (
TestPlotBase,
_check_plot_works,
)
@pytest.fixture
def hist_df():
np.random.seed(0)
df = DataFrame(np.random.randn(30, 2), columns=["A", "B"])
df["C"] = np.random.choice(["a", "b", "c"], 30)
df["D"] = np.random.choice(["a", "b", "c"], 30)
return df
@td.skip_if_no_mpl
class TestHistWithBy(TestPlotBase):
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, titles, legends",
[
("C", "A", ["a", "b", "c"], [["A"]] * 3),
("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3),
("C", None, ["a", "b", "c"], [["A", "B"]] * 3),
(
["C", "D"],
"A",
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A"]] * 9,
),
(
["C", "D"],
["A", "B"],
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A", "B"]] * 9,
),
(
["C", "D"],
None,
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A", "B"]] * 9,
),
],
)
def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df):
# GH 15079
axes = _check_plot_works(hist_df.plot.hist, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_legends = [
[legend.get_text() for legend in ax.get_legend().texts] for ax in axes
]
assert result_legends == legends
assert result_titles == titles
@pytest.mark.parametrize(
"by, column, titles, legends",
[
(0, "A", ["a", "b", "c"], [["A"]] * 3),
(0, None, ["a", "b", "c"], [["A", "B"]] * 3),
(
[0, "D"],
"A",
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
],
[["A"]] * 9,
),
],
)
def test_hist_plot_by_0(self, by, column, titles, legends, hist_df):
# GH 15079
df = hist_df.copy()
df = df.rename(columns={"C": 0})
axes = _check_plot_works(df.plot.hist, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_legends = [
[legend.get_text() for legend in ax.get_legend().texts] for ax in axes
]
assert result_legends == legends
assert result_titles == titles
@pytest.mark.parametrize(
"by, column",
[
([], ["A"]),
([], ["A", "B"]),
((), None),
((), ["A", "B"]),
],
)
def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df):
# GH 15079
msg = "No group keys passed"
with pytest.raises(ValueError, match=msg):
_check_plot_works(hist_df.plot.hist, column=column, by=by)
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, layout, axes_num",
[
(["C"], "A", (2, 2), 3),
("C", "A", (2, 2), 3),
(["C"], ["A"], (1, 3), 3),
("C", None, (3, 1), 3),
("C", ["A", "B"], (3, 1), 3),
(["C", "D"], "A", (9, 1), 9),
(["C", "D"], "A", (3, 3), 9),
(["C", "D"], ["A"], (5, 2), 9),
(["C", "D"], ["A", "B"], (9, 1), 9),
(["C", "D"], None, (9, 1), 9),
(["C", "D"], ["A", "B"], (5, 2), 9),
],
)
def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
# GH 15079
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
hist_df.plot.hist, column=column, by=by, layout=layout
)
self._check_axes_shape(axes, axes_num=axes_num, layout=layout)
@pytest.mark.parametrize(
"msg, by, layout",
[
("larger than required size", ["C", "D"], (1, 1)),
(re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
("At least one dimension of layout must be positive", "C", (-1, -1)),
],
)
def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
# GH 15079, test if error is raised when invalid layout is given
with pytest.raises(ValueError, match=msg):
hist_df.plot.hist(column=["A", "B"], by=by, layout=layout)
@pytest.mark.slow
def test_axis_share_x_with_by(self, hist_df):
# GH 15079
ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True)
# share x
assert self.get_x_axis(ax1).joined(ax1, ax2)
assert self.get_x_axis(ax2).joined(ax1, ax2)
assert self.get_x_axis(ax3).joined(ax1, ax3)
assert self.get_x_axis(ax3).joined(ax2, ax3)
# don't share y
assert not self.get_y_axis(ax1).joined(ax1, ax2)
assert not self.get_y_axis(ax2).joined(ax1, ax2)
assert not self.get_y_axis(ax3).joined(ax1, ax3)
assert not self.get_y_axis(ax3).joined(ax2, ax3)
@pytest.mark.slow
def test_axis_share_y_with_by(self, hist_df):
# GH 15079
ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True)
# share y
assert self.get_y_axis(ax1).joined(ax1, ax2)
assert self.get_y_axis(ax2).joined(ax1, ax2)
assert self.get_y_axis(ax3).joined(ax1, ax3)
assert self.get_y_axis(ax3).joined(ax2, ax3)
# don't share x
assert not self.get_x_axis(ax1).joined(ax1, ax2)
assert not self.get_x_axis(ax2).joined(ax1, ax2)
assert not self.get_x_axis(ax3).joined(ax1, ax3)
assert not self.get_x_axis(ax3).joined(ax2, ax3)
@pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
def test_figure_shape_hist_with_by(self, figsize, hist_df):
# GH 15079
axes = hist_df.plot.hist(column="A", by="C", figsize=figsize)
self._check_axes_shape(axes, axes_num=3, figsize=figsize)
@td.skip_if_no_mpl
class TestBoxWithBy(TestPlotBase):
@pytest.mark.parametrize(
"by, column, titles, xticklabels",
[
("C", "A", ["A"], [["a", "b", "c"]]),
(
["C", "D"],
"A",
["A"],
[
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
]
],
),
("C", ["A", "B"], ["A", "B"], [["a", "b", "c"]] * 2),
(
["C", "D"],
["A", "B"],
["A", "B"],
[
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
]
]
* 2,
),
(["C"], None, ["A", "B"], [["a", "b", "c"]] * 2),
],
)
def test_box_plot_by_argument(self, by, column, titles, xticklabels, hist_df):
# GH 15079
axes = _check_plot_works(hist_df.plot.box, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_xticklabels = [
[label.get_text() for label in ax.get_xticklabels()] for ax in axes
]
assert result_xticklabels == xticklabels
assert result_titles == titles
@pytest.mark.parametrize(
"by, column, titles, xticklabels",
[
(0, "A", ["A"], [["a", "b", "c"]]),
(
[0, "D"],
"A",
["A"],
[
[
"(a, a)",
"(a, b)",
"(a, c)",
"(b, a)",
"(b, b)",
"(b, c)",
"(c, a)",
"(c, b)",
"(c, c)",
]
],
),
(0, None, ["A", "B"], [["a", "b", "c"]] * 2),
],
)
def test_box_plot_by_0(self, by, column, titles, xticklabels, hist_df):
# GH 15079
df = hist_df.copy()
df = df.rename(columns={"C": 0})
axes = _check_plot_works(df.plot.box, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_xticklabels = [
[label.get_text() for label in ax.get_xticklabels()] for ax in axes
]
assert result_xticklabels == xticklabels
assert result_titles == titles
@pytest.mark.parametrize(
"by, column",
[
([], ["A"]),
((), "A"),
([], None),
((), ["A", "B"]),
],
)
def test_box_plot_with_none_empty_list_by(self, by, column, hist_df):
# GH 15079
msg = "No group keys passed"
with pytest.raises(ValueError, match=msg):
_check_plot_works(hist_df.plot.box, column=column, by=by)
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, layout, axes_num",
[
(["C"], "A", (1, 1), 1),
("C", "A", (1, 1), 1),
("C", None, (2, 1), 2),
("C", ["A", "B"], (1, 2), 2),
(["C", "D"], "A", (1, 1), 1),
(["C", "D"], None, (1, 2), 2),
],
)
def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
# GH 15079
axes = _check_plot_works(hist_df.plot.box, column=column, by=by, layout=layout)
self._check_axes_shape(axes, axes_num=axes_num, layout=layout)
@pytest.mark.parametrize(
"msg, by, layout",
[
("larger than required size", ["C", "D"], (1, 1)),
(re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
("At least one dimension of layout must be positive", "C", (-1, -1)),
],
)
def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
# GH 15079, test if error is raised when invalid layout is given
with pytest.raises(ValueError, match=msg):
hist_df.plot.box(column=["A", "B"], by=by, layout=layout)
@pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
def test_figure_shape_hist_with_by(self, figsize, hist_df):
# GH 15079
axes = hist_df.plot.box(column="A", by="C", figsize=figsize)
self._check_axes_shape(axes, axes_num=1, figsize=figsize)
| 32.276596
| 87
| 0.409278
| 1,402
| 12,136
| 3.381598
| 0.101284
| 0.020671
| 0.009492
| 0.038389
| 0.87471
| 0.853828
| 0.82683
| 0.777684
| 0.744147
| 0.618857
| 0
| 0.033319
| 0.413893
| 12,136
| 375
| 88
| 32.362667
| 0.633207
| 0.027769
| 0
| 0.55418
| 0
| 0
| 0.089744
| 0
| 0
| 0
| 0
| 0
| 0.077399
| 1
| 0.04644
| false
| 0.006192
| 0.021672
| 0
| 0.077399
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe4037c4dd48feaccb9c70bf626144a4f178a4e4
| 46
|
py
|
Python
|
gym_panda/envs/__init__.py
|
a-nooj/gym-panda
|
92c769fcd4d910b5913f99f96aa7cb1231a9d069
|
[
"MIT"
] | 53
|
2020-05-29T18:47:25.000Z
|
2022-02-16T16:00:29.000Z
|
gym_panda/envs/__init__.py
|
borninfreedom/gym-panda
|
92c769fcd4d910b5913f99f96aa7cb1231a9d069
|
[
"MIT"
] | 4
|
2020-06-15T18:24:03.000Z
|
2021-08-08T16:44:30.000Z
|
gym_panda/envs/__init__.py
|
borninfreedom/gym-panda
|
92c769fcd4d910b5913f99f96aa7cb1231a9d069
|
[
"MIT"
] | 18
|
2020-05-08T09:57:48.000Z
|
2022-01-15T16:20:11.000Z
|
from gym_panda.envs.panda_env import PandaEnv
| 23
| 45
| 0.869565
| 8
| 46
| 4.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe77f6830b185d45af18d65abb0fc08976d27e2c
| 11,264
|
py
|
Python
|
tests/test_lpqpydist.py
|
StongeEtienne/lpq-nanoflann
|
5e4df9c86729323ba69faff81defa657c1c9c941
|
[
"BSD-2-Clause"
] | 2
|
2021-12-16T21:24:06.000Z
|
2022-03-29T15:34:29.000Z
|
tests/test_lpqpydist.py
|
StongeEtienne/lpq-nanoflann
|
5e4df9c86729323ba69faff81defa657c1c9c941
|
[
"BSD-2-Clause"
] | 1
|
2021-12-17T15:04:40.000Z
|
2021-12-17T21:05:37.000Z
|
tests/test_lpqpydist.py
|
StongeEtienne/lpq-nanoflann
|
5e4df9c86729323ba69faff81defa657c1c9c941
|
[
"BSD-2-Clause"
] | 1
|
2021-12-16T21:48:08.000Z
|
2021-12-16T21:48:08.000Z
|
import pytest
import numpy as np
import lpqtree.lpqpydist as lpqdist
# testing values
a = np.array([[[2.0, 2.0, -2.0, 2.0],
[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0]],
[[-4.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 1.0, 0.0],
[1.0, -2.0, 2.0, 4.0]]])
l1_a = np.array([[8.0, 0.0, 1.0], [4.0, 2.0, 9.0]])
l2_a = np.array([[4.0, 0.0, 1.0], [4.0, np.sqrt(2.0), 5.0]])
# scaling factor
sc = np.pi
def test_l1():
assert np.allclose(lpqdist.l1(a), l1_a), "L1 test failed"
assert np.allclose(lpqdist.l1(a*0.0), l1_a*0.0), "L1 zero failed"
assert np.allclose(lpqdist.l1(a*sc), l1_a*sc), "L1 scaling failed"
assert np.allclose(lpqdist.l1(-a), l1_a), "L1 negative failed"
assert np.allclose(lpqdist.l1(a), np.linalg.norm(a, 1, axis=-1)), "L1 linalg failed"
def test_l2():
assert np.allclose(lpqdist.l2(a), l2_a), "L2 test failed"
assert np.allclose(lpqdist.l2(a*0.0), l2_a*0.0), "L2 zero failed"
assert np.allclose(lpqdist.l2(a*sc), l2_a*sc), "L2 scaling failed"
assert np.allclose(lpqdist.l2(-a), l2_a), "L1 negative failed"
assert np.allclose(lpqdist.l2(a), np.linalg.norm(a, 2, axis=-1)), "L1 linalg failed"
def test_lp():
assert np.allclose(lpqdist.lp(a, p=1), lpqdist.l1(a)), "Lp, p=1 test failed"
assert np.allclose(lpqdist.lp(a*0.0, p=1), lpqdist.l1(a*0.0)), "Lp, p=1 zero failed"
assert np.allclose(lpqdist.lp(a*sc, p=1), lpqdist.l1(a*sc)), "Lp, p=1 scaling failed"
assert np.allclose(lpqdist.lp(-a, p=1), lpqdist.l1(a)), "Lp, p=1 negative failed"
assert np.allclose(lpqdist.lp(a, p=2), lpqdist.l2(a)), "Lp, p=2 test failed"
assert np.allclose(lpqdist.lp(a*0.0, p=2), lpqdist.l2(a*0.0)), "Lp, p=2 zero failed"
assert np.allclose(lpqdist.lp(a*sc, p=2), lpqdist.l2(a*sc)), "Lp, p=2 scaling failed"
assert np.allclose(lpqdist.lp(-a, p=2), lpqdist.l2(a)), "Lp, p=2 negative failed"
for p in range(1, 100):
linalg_res = np.linalg.norm(a, p, axis=-1)
assert np.allclose(lpqdist.lp(a, p=p), linalg_res), "Lp linalg test failed"
assert np.allclose(lpqdist.lp(a*0.0, p=p), linalg_res*0.0), "Lp linalg zero failed"
assert np.allclose(lpqdist.lp(a*sc, p=p), linalg_res*sc), "Lp linalg scaling failed"
assert np.allclose(lpqdist.lp(-a, p=p), linalg_res), "Lp linalg negative failed"
def test_l11():
l11_res = lpqdist.l1(lpqdist.l1(a))
assert np.allclose(lpqdist.l11(a), l11_res), "L11 test failed"
assert np.allclose(lpqdist.l11(a*0.0), l11_res*0.0), "L11 zero failed"
assert np.allclose(lpqdist.l11(a*sc), l11_res*sc), "L11 scaling failed"
assert np.allclose(lpqdist.l11(-a), l11_res), "L11 negative failed"
def test_l12():
l12_res = lpqdist.l2(lpqdist.l1(a))
assert np.allclose(lpqdist.l12(a), l12_res), "L12 test failed"
assert np.allclose(lpqdist.l12(a*0.0), l12_res*0.0), "L12 zero failed"
assert np.allclose(lpqdist.l12(a*sc), l12_res*sc), "L12 scaling failed"
assert np.allclose(lpqdist.l12(-a), l12_res), "L12 negative failed"
def test_l21():
l21_res = lpqdist.l1(lpqdist.l2(a))
assert np.allclose(lpqdist.l21(a), l21_res), "L21 test failed"
assert np.allclose(lpqdist.l21(a*0.0), l21_res*0.0), "L21 zero failed"
assert np.allclose(lpqdist.l21(a*sc), l21_res*sc), "L21 scaling failed"
assert np.allclose(lpqdist.l21(-a), l21_res), "L21 negative failed"
def test_l22():
l22_res = lpqdist.l2(lpqdist.l2(a))
assert np.allclose(lpqdist.l22(a), l22_res), "L22 test failed"
assert np.allclose(lpqdist.l22(a*0.0), l22_res*0.0), "L22 zero failed"
assert np.allclose(lpqdist.l22(a*sc), l22_res*sc), "L22 scaling failed"
assert np.allclose(lpqdist.l22(-a), l22_res), "L22 negative failed"
def test_lp1():
assert np.allclose(lpqdist.lp1(a, p=1), lpqdist.l11(a)), "Lp1, p=1 test failed"
assert np.allclose(lpqdist.lp1(a, p=2), lpqdist.l21(a)), "Lp1, p=2 test failed"
for p in range(1, 100):
linalg_res = np.linalg.norm(np.linalg.norm(a, p, axis=-1), 1, axis=-1)
assert np.allclose(lpqdist.lp1(a, p=p), linalg_res), "Lp1 linalg test failed"
assert np.allclose(lpqdist.lp1(a*0.0, p=p), linalg_res*0.0), "Lp1 linalg zero failed"
assert np.allclose(lpqdist.lp1(a*sc, p=p), linalg_res*sc), "Lp1 linalg scaling failed"
assert np.allclose(lpqdist.lp1(-a, p=p), linalg_res), "Lp1 linalg negative failed"
def test_lp2():
assert np.allclose(lpqdist.lp2(a, p=1), lpqdist.l12(a)), "Lp2, p=1 test failed"
assert np.allclose(lpqdist.lp2(a, p=2), lpqdist.l22(a)), "Lp2, p=2 test failed"
for p in range(1, 100):
linalg_res = np.linalg.norm(np.linalg.norm(a, p, axis=-1), 2, axis=-1)
assert np.allclose(lpqdist.lp2(a, p=p), linalg_res), "Lp2 linalg test failed"
assert np.allclose(lpqdist.lp2(a * 0.0, p=p), linalg_res * 0.0), "Lp2 linalg zero failed"
assert np.allclose(lpqdist.lp2(a * sc, p=p), linalg_res * sc), "Lp2 linalg scaling failed"
assert np.allclose(lpqdist.lp2(-a, p=p), linalg_res), "Lp2 linalg negative failed"
def test_l1q():
assert np.allclose(lpqdist.l1q(a, q=1), lpqdist.l1(lpqdist.l1(a))), "L1q, q=1 test failed"
assert np.allclose(lpqdist.l1q(a, q=2), lpqdist.l12(a)), "L1q, q=2 test failed"
for q in range(1, 100):
linalg_res = np.linalg.norm(np.linalg.norm(a, 1, axis=-1), q, axis=-1)
assert np.allclose(lpqdist.l1q(a, q=q), linalg_res), "L1q linalg test failed"
assert np.allclose(lpqdist.l1q(a*0.0, q=q), linalg_res*0.0), "L1q linalg zero failed"
assert np.allclose(lpqdist.l1q(a*sc, q=q), linalg_res*sc), "L1q linalg scaling failed"
assert np.allclose(lpqdist.l1q(-a, q=q), linalg_res), "L1q linalg negative failed"
def test_l2q():
assert np.allclose(lpqdist.l2q(a, q=1), lpqdist.l1(lpqdist.l2(a))), "L2q, q=2 test failed"
assert np.allclose(lpqdist.l2q(a, q=2), lpqdist.l22(a)), "L2q, q=2 test failed"
for q in range(1, 100):
linalg_res = np.linalg.norm(np.linalg.norm(a, 2, axis=-1), q, axis=-1)
assert np.allclose(lpqdist.l2q(a, q=q), linalg_res), "L2q linalg test failed"
assert np.allclose(lpqdist.l2q(a*0.0, q=q), linalg_res*0.0), "L2q linalg zero failed"
assert np.allclose(lpqdist.l2q(a*sc, q=q), linalg_res*sc), "L2q linalg scaling failed"
assert np.allclose(lpqdist.l2q(-a, q=q), linalg_res), "L2q linalg negative failed"
def test_lpq():
assert np.allclose(lpqdist.lpq(a, p=2, q=1), lpqdist.l21(a)), "Lpq, p=2 q=1 test failed"
assert np.allclose(lpqdist.lpq(a, p=1, q=1), lpqdist.l1(lpqdist.l1(a))), "Lpq, p=1 q=1 test failed"
assert np.allclose(lpqdist.lpq(a, p=2, q=2), lpqdist.l2(lpqdist.l2(a))), "Lpq, p=2 q=2 test failed"
for p in range(1, 100):
for q in range(1, 100):
linalg_res = np.linalg.norm(np.linalg.norm(a, p, axis=-1), q, axis=-1)
assert np.allclose(lpqdist.lpq(a, p=p, q=q), linalg_res), "Lp linalg test failed"
assert np.allclose(lpqdist.lpq(a*0.0, p=p, q=q), linalg_res*0.0), "Lp linalg zero failed"
assert np.allclose(lpqdist.lpq(a*sc, p=p, q=q), linalg_res*sc), "Lp linalg scaling failed"
assert np.allclose(lpqdist.lpq(-a, p=p, q=q), linalg_res), "Lp linalg negative failed"
def test_l1m():
assert np.allclose(lpqdist.l1m(a), lpqdist.l11(a)/a.shape[-2]), "L2m test failed"
assert np.allclose(lpqdist.l1m(a*0.0), lpqdist.l1m(a)*0.0), "L2m zero failed"
assert np.allclose(lpqdist.l1m(a*sc), lpqdist.l1m(a)*sc), "L2m scaling failed"
assert np.allclose(lpqdist.l1m(-a), lpqdist.l1m(a)), "L2m zero failed"
def test_l2m():
assert np.allclose(lpqdist.l2m(a), lpqdist.l21(a)/a.shape[-2]), "L2m test failed"
assert np.allclose(lpqdist.l2m(a*0.0), lpqdist.l2m(a)*0.0), "L2m zero failed"
assert np.allclose(lpqdist.l2m(a*sc), lpqdist.l2m(a)*sc), "L2m scaling failed"
assert np.allclose(lpqdist.l2m(-a), lpqdist.l2m(a)), "L2m zero failed"
def test_lpm():
assert np.allclose(lpqdist.lpm(a, p=2), lpqdist.l21(a)/a.shape[-2]), "Lpm, p=2 test failed"
assert np.allclose(lpqdist.lpm(a, p=1), lpqdist.l1(lpqdist.l1(a))/a.shape[-2]), "Lpm, p=1 test failed"
for p in range(1, 100):
linalg_res = np.mean(np.linalg.norm(a, p, axis=-1), axis=-1)
assert np.allclose(lpqdist.lpm(a, p=p), linalg_res), "Lpm linalg test failed"
assert np.allclose(lpqdist.lpm(a*0.0, p=p), linalg_res*0.0), "Lpm linalg zero failed"
assert np.allclose(lpqdist.lpm(a*sc, p=p), linalg_res*sc), "Lpm linalg scaling failed"
assert np.allclose(lpqdist.lpm(-a, p=p), linalg_res), "Lpm linalg negative failed"
def test_lpq_switch():
assert np.allclose(lpqdist.lpq_switch(a, p=1, q=1), lpqdist.l11(a)), "Lpq_switch, p=1 q=1 test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=1, q=2), lpqdist.l12(a)), "Lpq_switch, p=1 q=2 test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=2, q=1), lpqdist.l21(a)), "Lpq_switch, p=2 q=1 test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=2, q=2), lpqdist.l22(a)), "Lpq_switch, p=2 q=2 test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=1, q="m"), lpqdist.l1m(a)), "Lpq_switch, p=1 mean test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=2, q="m"), lpqdist.l2m(a)), "Lpq_switch, p=2 mean test failed"
for p in range(1, 100):
assert np.allclose(lpqdist.lpq_switch(a, p=p, q="m"), lpqdist.lpm(a, p=p)), "Lpq_switch, mean test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=p, q=1), lpqdist.lp1(a, p=p)), "Lpq_switch, p=1 test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=p, q=2), lpqdist.lp2(a, p=p)), "Lpq_switch, p=2 test failed"
for q in range(1, 100):
assert np.allclose(lpqdist.lpq_switch(a, p=1, q=q), lpqdist.l1q(a, q=q)), "Lpq_switch, q=1 test failed"
assert np.allclose(lpqdist.lpq_switch(a, p=2, q=q), lpqdist.l2q(a, q=q)), "Lpq_switch, q=2 test failed"
for p in range(1, 100):
for q in range(1, 100):
lpq_res = lpqdist.lpq(a, p=p, q=q)
assert np.allclose(lpqdist.lpq_switch(a, p=p, q=q), lpq_res), "Lpq_switch test failed"
assert np.allclose(lpqdist.lpq_switch(a*0.0, p=p, q=q), lpq_res*0.0), "Lpq_switch zero failed"
assert np.allclose(lpqdist.lpq_switch(a*sc, p=p, q=q), lpq_res*sc), "Lpq_switch scaling failed"
assert np.allclose(lpqdist.lpq_switch(-a, p=p, q=q), lpq_res), "Lpq_switch negative failed"
def test_lpq_str_switch():
for p in range(1, 10):
norm_str = "l" + str(p) + "m"
assert np.allclose(lpqdist.lpq_str_switch(a, norm=norm_str), lpqdist.lpm(a, p=p)), "Lpm_switch test failed"
for q in range(1, 10):
norm_str = "l" + str(p) + str(q)
lpq_res = lpqdist.lpq(a, p=p, q=q)
assert np.allclose(lpqdist.lpq_str_switch(a, norm=norm_str), lpq_res), "Lpq_switch test failed"
assert np.allclose(lpqdist.lpq_str_switch(a*0.0, norm=norm_str), lpq_res*0.0), "Lpq_switch zero failed"
assert np.allclose(lpqdist.lpq_str_switch(a*sc, norm=norm_str), lpq_res*sc), "Lpq_switch scaling failed"
assert np.allclose(lpqdist.lpq_str_switch(-a, norm=norm_str), lpq_res), "Lpq_switch negative failed"
| 52.635514
| 116
| 0.645685
| 2,068
| 11,264
| 3.445841
| 0.030464
| 0.115633
| 0.231266
| 0.332445
| 0.910328
| 0.856301
| 0.793292
| 0.612826
| 0.526944
| 0.427168
| 0
| 0.062817
| 0.176048
| 11,264
| 213
| 117
| 52.882629
| 0.704989
| 0.002575
| 0
| 0.086957
| 0
| 0
| 0.19489
| 0
| 0
| 0
| 0
| 0
| 0.639752
| 1
| 0.10559
| false
| 0
| 0.018634
| 0
| 0.124224
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
229f10bb331d6890360919536677501b81f65dba
| 39
|
py
|
Python
|
Password_Protect.py
|
Alitma5094/Password-Protect
|
8278163051146b6b8f3dcb017867d954899e5209
|
[
"MIT"
] | null | null | null |
Password_Protect.py
|
Alitma5094/Password-Protect
|
8278163051146b6b8f3dcb017867d954899e5209
|
[
"MIT"
] | null | null | null |
Password_Protect.py
|
Alitma5094/Password-Protect
|
8278163051146b6b8f3dcb017867d954899e5209
|
[
"MIT"
] | null | null | null |
from cli import start_cli
start_cli()
| 9.75
| 25
| 0.794872
| 7
| 39
| 4.142857
| 0.571429
| 0.551724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 39
| 3
| 26
| 13
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
22ae407a1a844a391053cf26f9ce2822d17d6b5c
| 161,952
|
py
|
Python
|
libcloud/test/compute/test_dimensiondata_v2_4.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | null | null | null |
libcloud/test/compute/test_dimensiondata_v2_4.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | null | null | null |
libcloud/test/compute/test_dimensiondata_v2_4.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
import sys
from types import GeneratorType
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import DimensionDataAPIException, NetworkDomainServicePlan
from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification, DimensionDataServerDisk, DimensionDataServerVMWareTools
from libcloud.common.dimensiondata import DimensionDataTag, DimensionDataTagKey
from libcloud.common.dimensiondata import DimensionDataIpAddress, \
DimensionDataIpAddressList, DimensionDataChildIpAddressList, \
DimensionDataPortList, DimensionDataPort, DimensionDataChildPortList
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData
from libcloud.compute.drivers.dimensiondata import DimensionDataNic
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp, unittest, MockRawResponse, StorageMockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
from libcloud.utils.xml import fixxpath, findtext, findall
class DimensionData_v2_4_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
DimensionData.connectionCls.active_api_version = '2.4'
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionData.connectionCls.rawResponseCls = \
DimensionDataMockRawResponse
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
with self.assertRaises(InvalidCredsError):
self.driver.list_nodes()
def test_get_account_details(self):
DimensionDataMockHttp.type = None
ret = self.driver.connection.get_account_details()
self.assertEqual(ret.full_name, 'Test User')
self.assertEqual(ret.first_name, 'Test')
self.assertEqual(ret.email, 'test@example.com')
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_loc = ret[0]
self.assertEqual(first_loc.id, 'NA3')
self.assertEqual(first_loc.name, 'US - West')
self.assertEqual(first_loc.country, 'US')
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 7)
def test_node_extras(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(isinstance(ret[0].extra['vmWareTools'], DimensionDataServerVMWareTools))
self.assertTrue(isinstance(ret[0].extra['cpu'], DimensionDataServerCpuSpecification))
self.assertTrue(isinstance(ret[0].extra['disks'], list))
self.assertTrue(isinstance(ret[0].extra['disks'][0], DimensionDataServerDisk))
self.assertEqual(ret[0].extra['disks'][0].size_gb, 10)
self.assertTrue(isinstance(ret[1].extra['disks'], list))
self.assertTrue(isinstance(ret[1].extra['disks'][0], DimensionDataServerDisk))
self.assertEqual(ret[1].extra['disks'][0].size_gb, 10)
def test_server_states(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(ret[0].state == 'running')
self.assertTrue(ret[1].state == 'starting')
self.assertTrue(ret[2].state == 'stopping')
self.assertTrue(ret[3].state == 'reconfiguring')
self.assertTrue(ret[4].state == 'running')
self.assertTrue(ret[5].state == 'terminated')
self.assertTrue(ret[6].state == 'stopped')
self.assertEqual(len(ret), 7)
def test_list_nodes_response_PAGINATED(self):
DimensionDataMockHttp.type = 'PAGINATED'
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 9)
def test_paginated_mcp2_call_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'EMPTY'
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server')
empty_node_list = []
for node_list in node_list_generator:
empty_node_list.extend(node_list)
self.assertTrue(len(empty_node_list) == 0)
def test_paginated_mcp2_call_PAGED_THEN_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'PAGED_THEN_EMPTY'
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server')
final_node_list = []
for node_list in node_list_generator:
final_node_list.extend(node_list)
self.assertTrue(len(final_node_list) == 2)
def test_paginated_mcp2_call_with_page_size(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'PAGESIZE50'
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server', page_size=50)
self.assertTrue(isinstance(node_list_generator, GeneratorType))
# We're making sure here the filters make it to the URL
# See _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS for asserts
def test_list_nodes_response_strings_ALLFILTERS(self):
DimensionDataMockHttp.type = 'ALLFILTERS'
ret = self.driver.list_nodes(ex_location='fake_loc', ex_name='fake_name',
ex_ipv6='fake_ipv6', ex_ipv4='fake_ipv4', ex_vlan='fake_vlan',
ex_image='fake_image', ex_deployed=True,
ex_started=True, ex_state='fake_state',
ex_network='fake_network', ex_network_domain='fake_network_domain')
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 7)
node = ret[3]
self.assertTrue(isinstance(node.extra['disks'], list))
self.assertTrue(isinstance(node.extra['disks'][0], DimensionDataServerDisk))
self.assertEqual(node.size.id, '1')
self.assertEqual(node.image.id, '3ebf3c0f-90fe-4a8b-8585-6e65b316592c')
self.assertEqual(node.image.name, 'WIN2008S/32')
disk = node.extra['disks'][0]
self.assertEqual(disk.id, "c2e1f199-116e-4dbc-9960-68720b832b0a")
self.assertEqual(disk.scsi_id, 0)
self.assertEqual(disk.size_gb, 50)
self.assertEqual(disk.speed, "STANDARD")
self.assertEqual(disk.state, "NORMAL")
def test_list_nodes_response_LOCATION(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
first_loc = ret[0]
ret = self.driver.list_nodes(ex_location=first_loc)
for node in ret:
self.assertEqual(node.extra['datacenterId'], 'NA3')
def test_list_nodes_response_LOCATION_STR(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes(ex_location='NA3')
for node in ret:
self.assertEqual(node.extra['datacenterId'], 'NA3')
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, 'default')
def test_reboot_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
node.reboot()
def test_destroy_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
node.destroy()
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, 'RedHat 6 64-bit 2 CPU')
self.assertEqual(images[0].id, 'c14b1a46-2428-44c1-9c1a-b20e6418d08c')
self.assertEqual(images[0].extra['location'].id, 'NA9')
self.assertEqual(images[0].extra['cpu'].cpu_count, 2)
self.assertEqual(images[0].extra['OS_displayName'], 'REDHAT6/64')
def test_clean_failed_deployment_response_with_node(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_clean_failed_deployment_response_with_node_id(self):
node = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_ex_list_customer_images(self):
images = self.driver.ex_list_customer_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, 'ImportedCustomerImage')
self.assertEqual(images[0].id, '5234e5c7-01de-4411-8b6e-baeb8d91cf5d')
self.assertEqual(images[0].extra['location'].id, 'NA9')
self.assertEqual(images[0].extra['cpu'].cpu_count, 4)
self.assertEqual(images[0].extra['OS_displayName'], 'REDHAT6/64')
def test_create_mcp1_node_optional_param(self):
root_pw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
cpu_spec = DimensionDataServerCpuSpecification(cpu_count='4',
cores_per_socket='2',
performance='STANDARD')
disks = [DimensionDataServerDisk(scsi_id='0', speed='HIGHPERFORMANCE')]
node = self.driver.create_node(name='test2', image=image, auth=root_pw,
ex_description='test2 node',
ex_network=network,
ex_is_started=False,
ex_memory_gb=8,
ex_disks=disks,
ex_cpu_specification=cpu_spec,
ex_primary_dns='10.0.0.5',
ex_secondary_dns='10.0.0.6'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_mcp1_node_response_no_pass_random_gen(self):
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node',
ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows(self):
image = self.driver.ex_list_customer_images()[1]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux(self):
image = self.driver.ex_list_customer_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' not in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' not in node.extra)
def test_create_mcp1_node_response_STR(self):
rootPw = 'pass123'
image = self.driver.list_images()[0].id
network = self.driver.ex_list_networks()[0].id
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword('pass123')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
cpu = DimensionDataServerCpuSpecification(
cpu_count=4,
cores_per_socket=1,
performance='HIGHPERFORMANCE'
)
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False, ex_cpu_specification=cpu,
ex_memory_gb=4)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain_STR(self):
rootPw = NodeAuthPassword('pass123')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0].id
vlan = self.driver.ex_list_vlans(location=location)[0].id
cpu = DimensionDataServerCpuSpecification(
cpu_count=4,
cores_per_socket=1,
performance='HIGHPERFORMANCE'
)
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False, ex_cpu_specification=cpu,
ex_memory_gb=4)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_mcp1_node_no_network(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(InvalidRequestError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network=None,
ex_is_started=False)
def test_create_node_mcp1_ipv4(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network='fakenetwork',
ex_primary_ipv4='10.0.0.1',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_mcp1_network(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network='fakenetwork',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_mcp2_vlan(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_vlan='fakevlan',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_mcp2_ipv4(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_ipv4='10.0.0.1',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_network_domain_no_vlan_or_ipv4(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fake_network_domain',
ex_is_started=False)
def test_create_node_response(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_vlan='fakevlan'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_ms_time_zone(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_vlan='fakevlan',
ex_microsoft_time_zone='040'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_ambigious_mcps_fail(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_network='fakenetwork',
ex_primary_nic_vlan='fakevlan'
)
def test_create_node_no_network_domain_fail(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_primary_nic_vlan='fakevlan'
)
def test_create_node_no_primary_nic_fail(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain'
)
def test_create_node_primary_vlan_nic(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_vlan='fakevlan',
ex_primary_nic_network_adapter='v1000'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_primary_ipv4(self):
rootPw = 'pass123'
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_both_primary_nic_and_vlan_fail(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_primary_nic_vlan='fakevlan'
)
def test_create_node_cpu_specification(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
cpu_spec = DimensionDataServerCpuSpecification(cpu_count='4',
cores_per_socket='2',
performance='STANDARD')
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_cpu_specification=cpu_spec)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_memory(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_memory_gb=8)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_disks(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
disks = [DimensionDataServerDisk(scsi_id='0', speed='HIGHPERFORMANCE')]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_disks=disks)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_disks_fail(self):
root_pw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
disks = 'blah'
with self.assertRaises(TypeError):
self.driver.create_node(name='test2',
image=image,
auth=root_pw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_disks=disks)
def test_create_node_ipv4_gateway(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_ipv4_gateway='10.2.2.2')
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_network_domain_no_vlan_no_ipv4_fail(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fake_network_domain',
ex_is_started=False)
def test_create_node_mcp2_additional_nics_legacy(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
additional_vlans = ['fakevlan1', 'fakevlan2']
additional_ipv4 = ['10.0.0.2', '10.0.0.3']
node = self.driver.create_node(
name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_ipv4='10.0.0.1',
ex_additional_nics_vlan=additional_vlans,
ex_additional_nics_ipv4=additional_ipv4,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_bad_additional_nics_ipv4(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(TypeError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fake_network_domain',
ex_vlan='fake_vlan',
ex_additional_nics_ipv4='badstring',
ex_is_started=False)
def test_create_node_additional_nics(self):
root_pw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(vlan='fake_vlan',
network_adapter_name='v1000')
nic2 = DimensionDataNic(private_ip_v4='10.1.1.2',
network_adapter_name='v1000')
additional_nics = [nic1, nic2]
node = self.driver.create_node(name='test2',
image=image,
auth=root_pw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_additional_nics=additional_nics,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_additional_nics_vlan_ipv4_coexist_fail(self):
root_pw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(private_ip_v4='10.1.1.1', vlan='fake_vlan',
network_adapter_name='v1000')
nic2 = DimensionDataNic(private_ip_v4='10.1.1.2', vlan='fake_vlan2',
network_adapter_name='v1000')
additional_nics = [nic1, nic2]
with self.assertRaises(ValueError):
self.driver.create_node(name='test2',
image=image,
auth=root_pw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_additional_nics=additional_nics,
ex_is_started=False
)
def test_create_node_additional_nics_invalid_input_fail(self):
root_pw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
additional_nics = 'blah'
with self.assertRaises(TypeError):
self.driver.create_node(name='test2',
image=image,
auth=root_pw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_additional_nics=additional_nics,
ex_is_started=False
)
def test_create_node_additional_nics_vlan_ipv4_not_exist_fail(self):
root_pw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(network_adapter_name='v1000')
nic2 = DimensionDataNic(network_adapter_name='v1000')
additional_nics = [nic1, nic2]
with self.assertRaises(ValueError):
self.driver.create_node(name='test2',
image=image,
auth=root_pw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_additional_nics=additional_nics,
ex_is_started=False)
def test_create_node_bad_additional_nics_vlan(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
with self.assertRaises(TypeError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fake_network_domain',
ex_vlan='fake_vlan',
ex_additional_nics_vlan='badstring',
ex_is_started=False)
def test_create_node_mcp2_indicate_dns(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test node dns',
ex_network_domain='fakenetworkdomain',
ex_primary_ipv4='10.0.0.1',
ex_primary_dns='8.8.8.8',
ex_secondary_dns='8.8.4.4',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_ex_shutdown_graceful(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_shutdown_graceful(node)
self.assertTrue(ret is True)
def test_ex_shutdown_graceful_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_shutdown_graceful(node)
def test_ex_start_node(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret is True)
def test_ex_start_node_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_start_node(node)
def test_ex_power_off(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_power_off(node)
self.assertTrue(ret is True)
def test_ex_update_vm_tools(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_update_vm_tools(node)
self.assertTrue(ret is True)
def test_ex_power_off_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state='STOPPING',
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_power_off(node)
def test_ex_reset(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_reset(node)
self.assertTrue(ret is True)
def test_ex_attach_node_to_vlan(self):
node = self.driver.ex_get_node_by_id('e75ead52-692f-4314-8725-c8a4f4d13a87')
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
ret = self.driver.ex_attach_node_to_vlan(node, vlan)
self.assertTrue(ret is True)
def test_ex_destroy_nic(self):
node = self.driver.ex_destroy_nic('a202e51b-41c0-4cfc-add0-b1c62fc0ecf6')
self.assertTrue(node)
def test_list_networks(self):
nets = self.driver.list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_create_network(self):
location = self.driver.ex_get_location_by_id('NA9')
net = self.driver.ex_create_network(location, "Test Network", "test")
self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
self.assertEqual(net.name, "Test Network")
def test_ex_create_network_NO_DESCRIPTION(self):
location = self.driver.ex_get_location_by_id('NA9')
net = self.driver.ex_create_network(location, "Test Network")
self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
self.assertEqual(net.name, "Test Network")
def test_ex_delete_network(self):
net = self.driver.ex_list_networks()[0]
result = self.driver.ex_delete_network(net)
self.assertTrue(result)
def test_ex_rename_network(self):
net = self.driver.ex_list_networks()[0]
result = self.driver.ex_rename_network(net, "barry")
self.assertTrue(result)
def test_ex_create_network_domain(self):
location = self.driver.ex_get_location_by_id('NA9')
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(location=location,
name='test',
description='test',
service_plan=plan)
self.assertEqual(net.name, 'test')
self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa')
def test_ex_create_network_domain_NO_DESCRIPTION(self):
location = self.driver.ex_get_location_by_id('NA9')
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(location=location,
name='test',
service_plan=plan)
self.assertEqual(net.name, 'test')
self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa')
def test_ex_get_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
self.assertEqual(net.id, '8cdfd607-f429-4df6-9352-162cfc0891be')
self.assertEqual(net.description, 'test2')
self.assertEqual(net.name, 'test')
def test_ex_update_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
net.name = 'new name'
net2 = self.driver.ex_update_network_domain(net)
self.assertEqual(net2.name, 'new name')
def test_ex_delete_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
result = self.driver.ex_delete_network_domain(net)
self.assertTrue(result)
def test_ex_list_networks(self):
nets = self.driver.ex_list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains(self):
nets = self.driver.ex_list_network_domains()
self.assertEqual(nets[0].name, 'Aurora')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains_ALLFILTERS(self):
DimensionDataMockHttp.type = 'ALLFILTERS'
nets = self.driver.ex_list_network_domains(location='fake_location', name='fake_name',
service_plan='fake_plan', state='fake_state')
self.assertEqual(nets[0].name, 'Aurora')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_vlans(self):
vlans = self.driver.ex_list_vlans()
self.assertEqual(vlans[0].name, "Primary")
def test_ex_list_vlans_ALLFILTERS(self):
DimensionDataMockHttp.type = 'ALLFILTERS'
vlans = self.driver.ex_list_vlans(location='fake_location', network_domain='fake_network_domain',
name='fake_name', ipv4_address='fake_ipv4', ipv6_address='fake_ipv6', state='fake_state')
self.assertEqual(vlans[0].name, "Primary")
def test_ex_create_vlan(self,):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
vlan = self.driver.ex_create_vlan(network_domain=net,
name='test',
private_ipv4_base_address='10.3.4.0',
private_ipv4_prefix_size='24',
description='test vlan')
self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
def test_ex_create_vlan_NO_DESCRIPTION(self,):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
vlan = self.driver.ex_create_vlan(network_domain=net,
name='test',
private_ipv4_base_address='10.3.4.0',
private_ipv4_prefix_size='24')
self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
def test_ex_get_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
self.assertEqual(vlan.description, 'test2')
self.assertEqual(vlan.status, 'NORMAL')
self.assertEqual(vlan.name, 'Production VLAN')
self.assertEqual(vlan.private_ipv4_range_address, '10.0.3.0')
self.assertEqual(vlan.private_ipv4_range_size, 24)
self.assertEqual(vlan.ipv6_range_size, 64)
self.assertEqual(vlan.ipv6_range_address, '2607:f480:1111:1153:0:0:0:0')
self.assertEqual(vlan.ipv4_gateway, '10.0.3.1')
self.assertEqual(vlan.ipv6_gateway, '2607:f480:1111:1153:0:0:0:1')
def test_ex_wait_for_state(self):
self.driver.ex_wait_for_state('NORMAL',
self.driver.ex_get_vlan,
vlan_id='0e56433f-d808-4669-821d-812769517ff8')
def test_ex_wait_for_state_NODE(self):
self.driver.ex_wait_for_state('running',
self.driver.ex_get_node_by_id,
id='e75ead52-692f-4314-8725-c8a4f4d13a87')
def test_ex_wait_for_state_FAIL(self):
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_wait_for_state('starting',
self.driver.ex_get_node_by_id,
id='e75ead52-692f-4314-8725-c8a4f4d13a87',
timeout=2
)
self.assertEqual(context.exception.code, 'running')
self.assertTrue('timed out' in context.exception.msg)
def test_ex_update_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
vlan.name = 'new name'
vlan2 = self.driver.ex_update_vlan(vlan)
self.assertEqual(vlan2.name, 'new name')
def test_ex_delete_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
result = self.driver.ex_delete_vlan(vlan)
self.assertTrue(result)
def test_ex_expand_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
vlan.private_ipv4_range_size = '23'
vlan = self.driver.ex_expand_vlan(vlan)
self.assertEqual(vlan.private_ipv4_range_size, '23')
def test_ex_add_public_ip_block_to_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
block = self.driver.ex_add_public_ip_block_to_network_domain(net)
self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
def test_ex_list_public_ip_blocks(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
blocks = self.driver.ex_list_public_ip_blocks(net)
self.assertEqual(blocks[0].base_ip, '168.128.4.18')
self.assertEqual(blocks[0].size, '2')
self.assertEqual(blocks[0].id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
self.assertEqual(blocks[0].location.id, 'NA9')
self.assertEqual(blocks[0].network_domain.id, net.id)
def test_ex_get_public_ip_block(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
self.assertEqual(block.base_ip, '168.128.4.18')
self.assertEqual(block.size, '2')
self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
self.assertEqual(block.location.id, 'NA9')
self.assertEqual(block.network_domain.id, net.id)
def test_ex_delete_public_ip_block(self):
block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
result = self.driver.ex_delete_public_ip_block(block)
self.assertTrue(result)
def test_ex_list_firewall_rules(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
self.assertEqual(rules[0].id, '756cba02-b0bc-48f4-aea5-9445870b6148')
self.assertEqual(rules[0].network_domain.id, '8cdfd607-f429-4df6-9352-162cfc0891be')
self.assertEqual(rules[0].name, 'CCDEFAULT.BlockOutboundMailIPv4')
self.assertEqual(rules[0].action, 'DROP')
self.assertEqual(rules[0].ip_version, 'IPV4')
self.assertEqual(rules[0].protocol, 'TCP')
self.assertEqual(rules[0].source.ip_address, 'ANY')
self.assertTrue(rules[0].source.any_ip)
self.assertTrue(rules[0].destination.any_ip)
def test_ex_create_firewall_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[0], 'FIRST')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_create_firewall_rule_with_specific_source_ip(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(filter(lambda x: x.name == 'SpecificSourceIP',
rules))[0]
rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, 'FIRST')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_create_firewall_rule_with_source_ip(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = \
list(filter(lambda x: x.name == 'SpecificSourceIP',
rules))[0]
specific_source_ip_rule.source.any_ip = False
specific_source_ip_rule.source.ip_address = '10.0.0.1'
specific_source_ip_rule.source.ip_prefix_size = '15'
rule = self.driver.ex_create_firewall_rule(net,
specific_source_ip_rule,
'FIRST')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_create_firewall_rule_with_any_ip(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = \
list(filter(lambda x: x.name == 'SpecificSourceIP',
rules))[0]
specific_source_ip_rule.source.any_ip = True
rule = self.driver.ex_create_firewall_rule(net,
specific_source_ip_rule,
'FIRST')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_create_firewall_rule_ip_prefix_size(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.address_list_id = None
rule.source.any_ip = False
rule.source.ip_address = '10.2.1.1'
rule.source.ip_prefix_size = '10'
rule.destination.address_list_id = None
rule.destination.any_ip = False
rule.destination.ip_address = '10.0.0.1'
rule.destination.ip_prefix_size = '20'
self.driver.ex_create_firewall_rule(net, rule, 'LAST')
def test_ex_create_firewall_rule_address_list(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.address_list_id = '12345'
rule.destination.address_list_id = '12345'
self.driver.ex_create_firewall_rule(net, rule, 'LAST')
def test_ex_create_firewall_rule_port_list(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.port_list_id = '12345'
rule.destination.port_list_id = '12345'
self.driver.ex_create_firewall_rule(net, rule, 'LAST')
def test_ex_create_firewall_rule_port(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.port_list_id = None
rule.source.port_begin = '8000'
rule.source.port_end = '8005'
rule.destination.port_list_id = None
rule.destination.port_begin = '7000'
rule.destination.port_end = '7005'
self.driver.ex_create_firewall_rule(net, rule, 'LAST')
def test_ex_create_firewall_rule_ALL_VALUES(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
for rule in rules:
self.driver.ex_create_firewall_rule(net, rule, 'LAST')
def test_ex_create_firewall_rule_WITH_POSITION_RULE(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[-2], 'BEFORE', rules[-1])
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_create_firewall_rule_WITH_POSITION_RULE_STR(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[-2], 'BEFORE', 'RULE_WITH_SOURCE_AND_DEST')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_create_firewall_rule_FAIL_POSITION(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
with self.assertRaises(ValueError):
self.driver.ex_create_firewall_rule(net, rules[0], 'BEFORE')
def test_ex_create_firewall_rule_FAIL_POSITION_WITH_RULE(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
with self.assertRaises(ValueError):
self.driver.ex_create_firewall_rule(net, rules[0], 'LAST', 'RULE_WITH_SOURCE_AND_DEST')
def test_ex_get_firewall_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_set_firewall_rule_state(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
result = self.driver.ex_set_firewall_rule_state(rule, False)
self.assertTrue(result)
def test_ex_delete_firewall_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
result = self.driver.ex_delete_firewall_rule(rule)
self.assertTrue(result)
def test_ex_edit_firewall_rule(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.source.any_ip = True
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_ipaddresslist(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.source.address_list_id = '802abc9f-45a7-4efb-9d5a-810082368222'
rule.source.any_ip = False
rule.source.ip_address = '10.0.0.1'
rule.source.ip_prefix_size = 10
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_ipaddresslist(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.destination.address_list_id = '802abc9f-45a7-4efb-9d5a-810082368222'
rule.destination.any_ip = False
rule.destination.ip_address = '10.0.0.1'
rule.destination.ip_prefix_size = 10
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_ipaddress(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.source.address_list_id = None
rule.source.any_ip = False
rule.source.ip_address = '10.0.0.1'
rule.source.ip_prefix_size = '10'
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_ipaddress(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.destination.address_list_id = None
rule.destination.any_ip = False
rule.destination.ip_address = '10.0.0.1'
rule.destination.ip_prefix_size = '10'
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_with_relative_rule(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
placement_rule = self.driver.ex_list_firewall_rules(
network_domain=net)[-1]
result = self.driver.ex_edit_firewall_rule(
rule=rule, position='BEFORE',
relative_rule_for_position=placement_rule)
self.assertTrue(result)
def test_ex_edit_firewall_rule_with_relative_rule_by_name(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
placement_rule = self.driver.ex_list_firewall_rules(
network_domain=net)[-1]
result = self.driver.ex_edit_firewall_rule(
rule=rule, position='BEFORE',
relative_rule_for_position=placement_rule.name)
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_portlist(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.source.port_list_id = '802abc9f-45a7-4efb-9d5a-810082368222'
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_port(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.source.port_list_id = None
rule.source.port_begin = '3'
rule.source.port_end = '10'
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_portlist(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.destination.port_list_id = '802abc9f-45a7-4efb-9d5a-810082368222'
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_port(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
rule.destination.port_list_id = None
rule.destination.port_begin = '3'
rule.destination.port_end = '10'
result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
self.assertTrue(result)
def test_ex_edit_firewall_rule_invalid_position_fail(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
with self.assertRaises(ValueError):
self.driver.ex_edit_firewall_rule(rule=rule, position='BEFORE')
def test_ex_edit_firewall_rule_invalid_position_relative_rule_fail(self):
net = self.driver.ex_get_network_domain(
'8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(
net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
relative_rule = self.driver.ex_list_firewall_rules(
network_domain=net)[-1]
with self.assertRaises(ValueError):
self.driver.ex_edit_firewall_rule(rule=rule, position='FIRST',
relative_rule_for_position=relative_rule)
def test_ex_create_nat_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_create_nat_rule(net, '1.2.3.4', '4.3.2.1')
self.assertEqual(rule.id, 'd31c2db0-be6b-4d50-8744-9a7a534b5fba')
def test_ex_list_nat_rules(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_nat_rules(net)
self.assertEqual(rules[0].id, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
self.assertEqual(rules[0].internal_ip, '10.0.0.15')
self.assertEqual(rules[0].external_ip, '165.180.12.18')
def test_ex_get_nat_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_nat_rule(net, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
self.assertEqual(rule.id, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
self.assertEqual(rule.internal_ip, '10.0.0.16')
self.assertEqual(rule.external_ip, '165.180.12.19')
def test_ex_delete_nat_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_nat_rule(net, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
result = self.driver.ex_delete_nat_rule(rule)
self.assertTrue(result)
def test_ex_enable_monitoring(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_enable_monitoring(node, "ADVANCED")
self.assertTrue(result)
def test_ex_disable_monitoring(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_disable_monitoring(node)
self.assertTrue(result)
def test_ex_change_monitoring_plan(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_update_monitoring_plan(node, "ESSENTIALS")
self.assertTrue(result)
def test_ex_add_storage_to_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_add_storage_to_node(node, 30, 'PERFORMANCE')
self.assertTrue(result)
def test_ex_remove_storage_from_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_remove_storage_from_node(node, 0)
self.assertTrue(result)
def test_ex_change_storage_speed(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_change_storage_speed(node, 1, 'PERFORMANCE')
self.assertTrue(result)
def test_ex_change_storage_size(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_change_storage_size(node, 1, 100)
self.assertTrue(result)
def test_ex_clone_node_to_image(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_clone_node_to_image(node, 'my image', 'a description')
self.assertTrue(result)
def test_ex_update_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_update_node(node, 'my new name', 'a description', 2, 4048)
self.assertTrue(result)
def test_ex_reconfigure_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_reconfigure_node(node, 4, 4, 1, 'HIGHPERFORMANCE')
self.assertTrue(result)
def test_ex_get_location_by_id(self):
location = self.driver.ex_get_location_by_id('NA9')
self.assertTrue(location.id, 'NA9')
def test_ex_get_location_by_id_NO_LOCATION(self):
location = self.driver.ex_get_location_by_id(None)
self.assertIsNone(location)
def test_ex_get_base_image_by_id(self):
image_id = self.driver.list_images()[0].id
image = self.driver.ex_get_base_image_by_id(image_id)
self.assertEqual(image.extra['OS_type'], 'UNIX')
def test_ex_get_customer_image_by_id(self):
image_id = self.driver.ex_list_customer_images()[1].id
image = self.driver.ex_get_customer_image_by_id(image_id)
self.assertEqual(image.extra['OS_type'], 'WINDOWS')
def test_ex_get_image_by_id_base_img(self):
image_id = self.driver.list_images()[1].id
image = self.driver.ex_get_base_image_by_id(image_id)
self.assertEqual(image.extra['OS_type'], 'WINDOWS')
def test_ex_get_image_by_id_customer_img(self):
image_id = self.driver.ex_list_customer_images()[0].id
image = self.driver.ex_get_customer_image_by_id(image_id)
self.assertEqual(image.extra['OS_type'], 'UNIX')
def test_ex_get_image_by_id_customer_FAIL(self):
image_id = 'FAKE_IMAGE_ID'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_get_base_image_by_id(image_id)
def test_ex_create_anti_affinity_rule(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule([node_list[0], node_list[1]])
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_TUPLE(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1]))
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_TUPLE_STR(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule((node_list[0].id, node_list[1].id))
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_FAIL_STR(self):
node_list = 'string'
with self.assertRaises(TypeError):
self.driver.ex_create_anti_affinity_rule(node_list)
def test_ex_create_anti_affinity_rule_FAIL_EXISTING(self):
node_list = self.driver.list_nodes()
DimensionDataMockHttp.type = 'FAIL_EXISTING'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1]))
def test_ex_delete_anti_affinity_rule(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
success = self.driver.ex_delete_anti_affinity_rule(rule)
self.assertTrue(success)
def test_ex_delete_anti_affinity_rule_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
success = self.driver.ex_delete_anti_affinity_rule(rule.id)
self.assertTrue(success)
def test_ex_delete_anti_affinity_rule_FAIL(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_delete_anti_affinity_rule(rule)
def test_ex_list_anti_affinity_rules_NETWORK_DOMAIN(self):
net_domain = self.driver.ex_list_network_domains()[0]
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_NETWORK(self):
network = self.driver.list_networks()[0]
rules = self.driver.ex_list_anti_affinity_rules(network=network)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_NODE(self):
node = self.driver.list_nodes()[0]
rules = self.driver.ex_list_anti_affinity_rules(node=node)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_PAGINATED(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = 'PAGINATED'
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 4)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_ALLFILTERS(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = 'ALLFILTERS'
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain, filter_id='FAKE_ID', filter_state='FAKE_STATE')
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_BAD_ARGS(self):
with self.assertRaises(ValueError):
self.driver.ex_list_anti_affinity_rules(network='fake_network', network_domain='fake_network_domain')
def test_ex_create_tag_key(self):
success = self.driver.ex_create_tag_key('MyTestKey')
self.assertTrue(success)
def test_ex_create_tag_key_ALLPARAMS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'ALLPARAMS'
success = self.driver.ex_create_tag_key('MyTestKey', description="Test Key Desc.", value_required=False, display_on_report=False)
self.assertTrue(success)
def test_ex_create_tag_key_BADREQUEST(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'BADREQUEST'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_create_tag_key('MyTestKey')
def test_ex_list_tag_keys(self):
tag_keys = self.driver.ex_list_tag_keys()
self.assertTrue(isinstance(tag_keys, list))
self.assertTrue(isinstance(tag_keys[0], DimensionDataTagKey))
self.assertTrue(isinstance(tag_keys[0].id, str))
def test_ex_list_tag_keys_ALLFILTERS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'ALLFILTERS'
self.driver.ex_list_tag_keys(id='fake_id', name='fake_name', value_required=False, display_on_report=False)
def test_ex_get_tag_by_id(self):
tag = self.driver.ex_get_tag_key_by_id('d047c609-93d7-4bc5-8fc9-732c85840075')
self.assertTrue(isinstance(tag, DimensionDataTagKey))
def test_ex_get_tag_by_id_NOEXIST(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'NOEXIST'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_get_tag_key_by_id('d047c609-93d7-4bc5-8fc9-732c85840075')
def test_ex_get_tag_by_name(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'SINGLE'
tag = self.driver.ex_get_tag_key_by_name('LibcloudTest')
self.assertTrue(isinstance(tag, DimensionDataTagKey))
def test_ex_get_tag_by_name_NOEXIST(self):
with self.assertRaises(ValueError):
self.driver.ex_get_tag_key_by_name('LibcloudTest')
def test_ex_modify_tag_key_NAME(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = 'NAME'
success = self.driver.ex_modify_tag_key(tag_key, name='NewName')
self.assertTrue(success)
def test_ex_modify_tag_key_NOTNAME(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = 'NOTNAME'
success = self.driver.ex_modify_tag_key(tag_key, description='NewDesc', value_required=False, display_on_report=True)
self.assertTrue(success)
def test_ex_modify_tag_key_NOCHANGE(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = 'NOCHANGE'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_modify_tag_key(tag_key)
def test_ex_remove_tag_key(self):
tag_key = self.driver.ex_list_tag_keys()[0]
success = self.driver.ex_remove_tag_key(tag_key)
self.assertTrue(success)
def test_ex_remove_tag_key_NOEXIST(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = 'NOEXIST'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_remove_tag_key(tag_key)
def test_ex_apply_tag_to_asset(self):
node = self.driver.list_nodes()[0]
success = self.driver.ex_apply_tag_to_asset(node, 'TagKeyName', 'FakeValue')
self.assertTrue(success)
def test_ex_apply_tag_to_asset_NOVALUE(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = 'NOVALUE'
success = self.driver.ex_apply_tag_to_asset(node, 'TagKeyName')
self.assertTrue(success)
def test_ex_apply_tag_to_asset_NOTAGKEY(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = 'NOTAGKEY'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_apply_tag_to_asset(node, 'TagKeyNam')
def test_ex_apply_tag_to_asset_BADASSETTYPE(self):
network = self.driver.list_networks()[0]
DimensionDataMockHttp.type = 'NOTAGKEY'
with self.assertRaises(TypeError):
self.driver.ex_apply_tag_to_asset(network, 'TagKeyNam')
def test_ex_remove_tag_from_asset(self):
node = self.driver.list_nodes()[0]
success = self.driver.ex_remove_tag_from_asset(node, 'TagKeyName')
self.assertTrue(success)
def test_ex_remove_tag_from_asset_NOTAG(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = 'NOTAG'
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_remove_tag_from_asset(node, 'TagKeyNam')
def test_ex_list_tags(self):
tags = self.driver.ex_list_tags()
self.assertTrue(isinstance(tags, list))
self.assertTrue(isinstance(tags[0], DimensionDataTag))
self.assertTrue(len(tags) == 3)
def test_ex_list_tags_ALLPARAMS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'ALLPARAMS'
tags = self.driver.ex_list_tags(asset_id='fake_asset_id', asset_type='fake_asset_type',
location='fake_location', tag_key_name='fake_tag_key_name',
tag_key_id='fake_tag_key_id', value='fake_value',
value_required=False, display_on_report=False)
self.assertTrue(isinstance(tags, list))
self.assertTrue(isinstance(tags[0], DimensionDataTag))
self.assertTrue(len(tags) == 3)
def test_priv_location_to_location_id(self):
location = self.driver.ex_get_location_by_id('NA9')
self.assertEqual(
self.driver._location_to_location_id(location),
'NA9'
)
def test_priv_location_to_location_id_STR(self):
self.assertEqual(
self.driver._location_to_location_id('NA9'),
'NA9'
)
def test_priv_location_to_location_id_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._location_to_location_id([1, 2, 3])
def test_priv_image_needs_auth_os_img(self):
image = self.driver.list_images()[1]
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_os_img_STR(self):
image = self.driver.list_images()[1].id
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_windows(self):
image = self.driver.ex_list_customer_images()[1]
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_linux(self):
image = self.driver.ex_list_customer_images()[0]
self.assertTrue(not self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
self.assertTrue(not self.driver._image_needs_auth(image))
def test_summary_usage_report(self):
report = self.driver.ex_summary_usage_report('2016-06-01', '2016-06-30')
report_content = report
self.assertEqual(len(report_content), 13)
self.assertEqual(len(report_content[0]), 6)
def test_detailed_usage_report(self):
report = self.driver.ex_detailed_usage_report('2016-06-01', '2016-06-30')
report_content = report
self.assertEqual(len(report_content), 42)
self.assertEqual(len(report_content[0]), 4)
def test_audit_log_report(self):
report = self.driver.ex_audit_log_report('2016-06-01', '2016-06-30')
report_content = report
self.assertEqual(len(report_content), 25)
self.assertEqual(report_content[2][2], 'OEC_SYSTEM')
def test_ex_list_ip_address_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
ip_list = self.driver.ex_list_ip_address_list(
ex_network_domain=net_domain)
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 4)
self.assertTrue(isinstance(ip_list[0].name, str))
self.assertTrue(isinstance(ip_list[0].description, str))
self.assertTrue(isinstance(ip_list[0].ip_version, str))
self.assertTrue(isinstance(ip_list[0].state, str))
self.assertTrue(isinstance(ip_list[0].create_time, str))
self.assertTrue(isinstance(ip_list[0].child_ip_address_lists, list))
self.assertEqual(len(ip_list[1].child_ip_address_lists), 1)
self.assertTrue(isinstance(ip_list[1].child_ip_address_lists[0].name,
str))
def test_ex_get_ip_address_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = 'FILTERBYNAME'
ip_list = self.driver.ex_get_ip_address_list(
ex_network_domain=net_domain.id,
ex_ip_address_list_name='Test_IP_Address_List_3')
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 1)
self.assertTrue(isinstance(ip_list[0].name, str))
self.assertTrue(isinstance(ip_list[0].description, str))
self.assertTrue(isinstance(ip_list[0].ip_version, str))
self.assertTrue(isinstance(ip_list[0].state, str))
self.assertTrue(isinstance(ip_list[0].create_time, str))
ips = ip_list[0].ip_address_collection
self.assertEqual(len(ips), 3)
self.assertTrue(isinstance(ips[0].begin, str))
self.assertTrue(isinstance(ips[0].prefix_size, str))
self.assertTrue(isinstance(ips[2].end, str))
def test_ex_create_ip_address_list_FAIL(self):
net_domain = self.driver.ex_list_network_domains()[0]
with self.assertRaises(TypeError):
self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain.id)
def test_ex_create_ip_address_list(self):
name = "Test_IP_Address_List_3"
description = "Test Description"
ip_version = "IPV4"
child_ip_address_list_id = '0291ef78-4059-4bc1-b433-3f6ad698dc41'
child_ip_address_list = DimensionDataChildIpAddressList(
id=child_ip_address_list_id,
name="test_child_ip_addr_list")
net_domain = self.driver.ex_list_network_domains()[0]
ip_address_1 = DimensionDataIpAddress(begin='190.2.2.100')
ip_address_2 = DimensionDataIpAddress(begin='190.2.2.106',
end='190.2.2.108')
ip_address_3 = DimensionDataIpAddress(begin='190.2.2.0',
prefix_size='24')
ip_address_collection = [ip_address_1, ip_address_2,
ip_address_3]
# Create IP Address List
success = self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain, name=name,
ip_version=ip_version, description=description,
ip_address_collection=ip_address_collection,
child_ip_address_list=child_ip_address_list)
self.assertTrue(success)
def test_ex_create_ip_address_list_STR(self):
name = "Test_IP_Address_List_3"
description = "Test Description"
ip_version = "IPV4"
child_ip_address_list_id = '0291ef78-4059-4bc1-b433-3f6ad698dc41'
net_domain = self.driver.ex_list_network_domains()[0]
ip_address_1 = DimensionDataIpAddress(begin='190.2.2.100')
ip_address_2 = DimensionDataIpAddress(begin='190.2.2.106',
end='190.2.2.108')
ip_address_3 = DimensionDataIpAddress(begin='190.2.2.0',
prefix_size='24')
ip_address_collection = [ip_address_1, ip_address_2,
ip_address_3]
# Create IP Address List
success = self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain.id, name=name,
ip_version=ip_version, description=description,
ip_address_collection=ip_address_collection,
child_ip_address_list=child_ip_address_list_id)
self.assertTrue(success)
def test_ex_edit_ip_address_list(self):
ip_address_1 = DimensionDataIpAddress(begin='190.2.2.111')
ip_address_collection = [ip_address_1]
child_ip_address_list = DimensionDataChildIpAddressList(
id='2221ef78-4059-4bc1-b433-3f6ad698dc41',
name="test_child_ip_address_list edited")
ip_address_list = DimensionDataIpAddressList(
id='1111ef78-4059-4bc1-b433-3f6ad698d111',
name="test ip address list edited",
ip_version="IPv4", description="test",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
state="NORMAL",
create_time='2015-09-29T02:49:45'
)
success = self.driver.ex_edit_ip_address_list(
ex_ip_address_list=ip_address_list,
description="test ip address list",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list
)
self.assertTrue(success)
def test_ex_edit_ip_address_list_STR(self):
ip_address_1 = DimensionDataIpAddress(begin='190.2.2.111')
ip_address_collection = [ip_address_1]
child_ip_address_list = DimensionDataChildIpAddressList(
id='2221ef78-4059-4bc1-b433-3f6ad698dc41',
name="test_child_ip_address_list edited")
success = self.driver.ex_edit_ip_address_list(
ex_ip_address_list='84e34850-595d- 436e-a885-7cd37edb24a4',
description="test ip address list",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list
)
self.assertTrue(success)
def test_ex_delete_ip_address_list(self):
child_ip_address_list = DimensionDataChildIpAddressList(
id='2221ef78-4059-4bc1-b433-3f6ad698dc41',
name="test_child_ip_address_list edited")
ip_address_list = DimensionDataIpAddressList(
id='1111ef78-4059-4bc1-b433-3f6ad698d111',
name="test ip address list edited",
ip_version="IPv4", description="test",
ip_address_collection=None,
child_ip_address_lists=child_ip_address_list,
state="NORMAL",
create_time='2015-09-29T02:49:45'
)
success = self.driver.ex_delete_ip_address_list(
ex_ip_address_list=ip_address_list)
self.assertTrue(success)
def test_ex_delete_ip_address_list_STR(self):
success = self.driver.ex_delete_ip_address_list(
ex_ip_address_list='111ef78-4059-4bc1-b433-3f6ad698d111')
self.assertTrue(success)
def test_ex_list_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(
ex_network_domain=net_domain)
self.assertTrue(isinstance(portlist, list))
self.assertEqual(len(portlist), 3)
self.assertTrue(isinstance(portlist[0].name, str))
self.assertTrue(isinstance(portlist[0].description, str))
self.assertTrue(isinstance(portlist[0].state, str))
self.assertTrue(isinstance(portlist[0].port_collection, list))
self.assertTrue(isinstance(portlist[0].port_collection[0].begin, str))
self.assertTrue(isinstance(portlist[0].port_collection[0].end, str))
self.assertTrue(isinstance(portlist[0].child_portlist_list, list))
self.assertTrue(isinstance(portlist[0].child_portlist_list[0].id,
str))
self.assertTrue(isinstance(portlist[0].child_portlist_list[0].name,
str))
self.assertTrue(isinstance(portlist[0].create_time, str))
def test_ex_get_port_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist_id = self.driver.ex_list_portlist(
ex_network_domain=net_domain)[0].id
portlist = self.driver.ex_get_portlist(
ex_portlist_id=portlist_id)
self.assertTrue(isinstance(portlist, DimensionDataPortList))
self.assertTrue(isinstance(portlist.name, str))
self.assertTrue(isinstance(portlist.description, str))
self.assertTrue(isinstance(portlist.state, str))
self.assertTrue(isinstance(portlist.port_collection, list))
self.assertTrue(isinstance(portlist.port_collection[0].begin, str))
self.assertTrue(isinstance(portlist.port_collection[0].end, str))
self.assertTrue(isinstance(portlist.child_portlist_list, list))
self.assertTrue(isinstance(portlist.child_portlist_list[0].id,
str))
self.assertTrue(isinstance(portlist.child_portlist_list[0].name,
str))
self.assertTrue(isinstance(portlist.create_time, str))
def test_ex_get_portlist_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(
ex_network_domain=net_domain)[0]
port_list = self.driver.ex_get_portlist(
ex_portlist_id=portlist.id)
self.assertTrue(isinstance(port_list, DimensionDataPortList))
self.assertTrue(isinstance(port_list.name, str))
self.assertTrue(isinstance(port_list.description, str))
self.assertTrue(isinstance(port_list.state, str))
self.assertTrue(isinstance(port_list.port_collection, list))
self.assertTrue(isinstance(port_list.port_collection[0].begin, str))
self.assertTrue(isinstance(port_list.port_collection[0].end, str))
self.assertTrue(isinstance(port_list.child_portlist_list, list))
self.assertTrue(isinstance(port_list.child_portlist_list[0].id,
str))
self.assertTrue(isinstance(port_list.child_portlist_list[0].name,
str))
self.assertTrue(isinstance(port_list.create_time, str))
def test_ex_create_portlist_NOCHILDPORTLIST(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin='8080')
port_2 = DimensionDataIpAddress(begin='8899',
end='9023')
port_collection = [port_1, port_2]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain, name=name,
description=description,
port_collection=port_collection
)
self.assertTrue(success)
def test_ex_create_portlist(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin='8080')
port_2 = DimensionDataIpAddress(begin='8899',
end='9023')
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1')
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2')
child_ports = [child_port_1, child_port_2]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain, name=name,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports
)
self.assertTrue(success)
def test_ex_create_portlist_STR(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin='8080')
port_2 = DimensionDataIpAddress(begin='8899',
end='9023')
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1')
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2')
child_ports_ids = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain.id, name=name,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports_ids
)
self.assertTrue(success)
def test_ex_edit_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
description = "Test Description"
port_1 = DimensionDataPort(begin='8080')
port_2 = DimensionDataIpAddress(begin='8899',
end='9023')
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1')
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2')
child_ports = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_edit_portlist(
ex_portlist=portlist,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports
)
self.assertTrue(success)
def test_ex_edit_portlist_STR(self):
portlist_id = "484174a2-ae74-4658-9e56-50fc90e086cf"
description = "Test Description"
port_1 = DimensionDataPort(begin='8080')
port_2 = DimensionDataIpAddress(begin='8899',
end='9023')
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name='test port 1')
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name='test port 2')
child_ports_ids = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_edit_portlist(
ex_portlist=portlist_id,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports_ids
)
self.assertTrue(success)
def test_ex_delete_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
success = self.driver.ex_delete_portlist(
ex_portlist=portlist)
self.assertTrue(success)
def test_ex_delete_portlist_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
success = self.driver.ex_delete_portlist(
ex_portlist=portlist.id)
self.assertTrue(success)
def test_import_image(self):
tag_dictionaries = {'tagkey1_name': 'dev test', 'tagkey2_name': None}
success = self.driver.import_image(
ovf_package_name='aTestGocToNGoc2_export2.mf',
name='Libcloud NGOCImage_New 2',
description='test',
cluster_id='QA1_N2_VMWARE_1-01',
is_guest_os_customization='false',
tagkey_name_value_dictionaries=tag_dictionaries)
self.assertTrue(success)
def test_import_image_error_too_many_choice(self):
tag_dictionaries = {'tagkey1_name': 'dev test', 'tagkey2_name': None}
with self.assertRaises(ValueError):
self.driver.import_image(
ovf_package_name='aTestGocToNGoc2_export2.mf',
name='Libcloud NGOCImage_New 2',
description='test',
cluster_id='QA1_N2_VMWARE_1-01',
datacenter_id='QA1_N1_VMWARE_1',
is_guest_os_customization='false',
tagkey_name_value_dictionaries=tag_dictionaries)
def test_import_image_error_missing_choice(self):
tag_dictionaries = {'tagkey1_name': 'dev test', 'tagkey2_name': None}
with self.assertRaises(ValueError):
self.driver.import_image(
ovf_package_name='aTestGocToNGoc2_export2.mf',
name='Libcloud NGOCImage_New 2',
description='test',
cluster_id=None,
datacenter_id=None,
is_guest_os_customization='false',
tagkey_name_value_dictionaries=tag_dictionaries)
def test_exchange_nic_vlans(self):
success = self.driver.ex_exchange_nic_vlans(
nic_id_1='a4b4b42b-ccb5-416f-b052-ce7cb7fdff12',
nic_id_2='b39d09b8-ea65-424a-8fa6-c6f5a98afc69')
self.assertTrue(success)
def test_change_nic_network_adapter(self):
success = self.driver.ex_change_nic_network_adapter(
nic_id='0c55c269-20a5-4fec-8054-22a245a48fe4',
network_adapter_name='E1000')
self.assertTrue(success)
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockRawResponse(MockRawResponse):
fixtures = ComputeFileFixtures('dimensiondata')
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usage(self, method, url, body, headers):
body = self.fixtures.load(
'summary_usage_report.csv'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usageDetailed(self, method, url, body, headers):
body = self.fixtures.load(
'detailed_usage_report.csv'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_auditlog(self, method, url, body, headers):
body = self.fixtures.load(
'audit_log.csv'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
class DimensionDataMockHttp(StorageMockHttp, MockHttp):
fixtures = ComputeFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_PAGINATED(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_ALLFILTERS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_imageWithDiskSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers):
if method is "POST":
request = ET.fromstring(body)
if request.tag != "{http://oec.api.opsource.net/schemas/network}NewNetworkWithLocation":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation_NA9(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e(self, method,
url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1(self, method, url, body, headers):
action = url.split('?')[-1]
if action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_FAIL_EXISTING(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418_FAIL(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_deleteServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_deleteServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_rebootServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_rebootServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
if url.endswith('datacenterId=NA3'):
body = self.fixtures.load(
'2.4/server_server_NA3.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGESIZE50(self, method, url, body, headers):
if not url.endswith('pageSize=50'):
raise ValueError("pageSize is not set as expected")
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_EMPTY(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_paginated_empty.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGED_THEN_EMPTY(self, method, url, body, headers):
if 'pageNumber=2' in url:
body = self.fixtures.load(
'server_server_paginated_empty.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load(
'2.4/server_server_paginated.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATED(self, method, url, body, headers):
if 'pageNumber=2' in url:
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load(
'2.4/server_server_paginated.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATEDEMPTY(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_paginated_empty.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'datacenterId':
assert value == 'fake_loc'
elif key == 'networkId':
assert value == 'fake_network'
elif key == 'networkDomainId':
assert value == 'fake_network_domain'
elif key == 'vlanId':
assert value == 'fake_vlan'
elif key == 'ipv6':
assert value == 'fake_ipv6'
elif key == 'privateIpv4':
assert value == 'fake_ipv4'
elif key == 'name':
assert value == 'fake_name'
elif key == 'state':
assert value == 'fake_state'
elif key == 'started':
assert value == 'True'
elif key == 'deployed':
assert value == 'True'
elif key == 'sourceImageId':
assert value == 'fake_image'
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'2.4/server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule(self, method, url, body, headers):
body = self.fixtures.load(
'server_antiAffinityRule_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'id':
assert value == 'FAKE_ID'
elif key == 'state':
assert value == 'FAKE_STATE'
elif key == 'pageSize':
assert value == '250'
elif key == 'networkDomainId':
pass
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'server_antiAffinityRule_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_PAGINATED(self, method, url, body, headers):
if 'pageNumber=2' in url:
body = self.fixtures.load(
'server_antiAffinityRule_list.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load(
'server_antiAffinityRule_list_PAGINATED.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(self, method, url, body, headers):
if url.endswith('id=NA9'):
body = self.fixtures.load(
'infrastructure_datacenter_NA9.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter_ALLFILTERS(self, method, url, body, headers):
if url.endswith('id=NA9'):
body = self.fixtures.load(
'infrastructure_datacenter_NA9.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_updateVmwareTools(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}updateVmwareTools":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_updateVmwareTools.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_startServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_startServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_shutdownServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_shutdownServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}resetServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_resetServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_powerOffServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_powerOffServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_11_INPROGRESS(
self, method, url, body, headers):
body = self.fixtures.load('2.4/server_GetServer.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(self, method, url, body, headers):
body = self.fixtures.load(
'network_networkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'datacenterId':
assert value == 'fake_location'
elif key == 'type':
assert value == 'fake_plan'
elif key == 'name':
assert value == 'fake_name'
elif key == 'state':
assert value == 'fake_state'
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'network_networkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(self, method, url, body, headers):
body = self.fixtures.load(
'network_vlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'datacenterId':
assert value == 'fake_location'
elif key == 'networkDomainId':
assert value == 'fake_network_domain'
elif key == 'ipv6Address':
assert value == 'fake_ipv6'
elif key == 'privateIpv4Address':
assert value == 'fake_ipv4'
elif key == 'name':
assert value == 'fake_name'
elif key == 'state':
assert value == 'fake_state'
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'network_vlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployServer":
raise InvalidRequestError(request.tag)
# Make sure the we either have a network tag with an IP or networkId
# Or Network info with a primary nic that has privateip or vlanid
network = request.find(fixxpath('network', TYPES_URN))
network_info = request.find(fixxpath('networkInfo', TYPES_URN))
if network is not None:
if network_info is not None:
raise InvalidRequestError("Request has both MCP1 and MCP2 values")
ipv4 = findtext(network, 'privateIpv4', TYPES_URN)
networkId = findtext(network, 'networkId', TYPES_URN)
if ipv4 is None and networkId is None:
raise InvalidRequestError('Invalid request MCP1 requests need privateIpv4 or networkId')
elif network_info is not None:
if network is not None:
raise InvalidRequestError("Request has both MCP1 and MCP2 values")
primary_nic = network_info.find(fixxpath('primaryNic', TYPES_URN))
ipv4 = findtext(primary_nic, 'privateIpv4', TYPES_URN)
vlanId = findtext(primary_nic, 'vlanId', TYPES_URN)
if ipv4 is None and vlanId is None:
raise InvalidRequestError('Invalid request MCP2 requests need privateIpv4 or vlanId')
else:
raise InvalidRequestError('Invalid request, does not have network or network_info in XML')
body = self.fixtures.load(
'server_deployServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployNetworkDomain(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_deployNetworkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be(self, method, url, body, headers):
body = self.fixtures.load(
'network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be_ALLFILTERS(self, method, url, body, headers):
body = self.fixtures.load(
'network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editNetworkDomain(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_editNetworkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNetworkDomain(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_deleteNetworkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_deployVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_0e56433f_d808_4669_821d_812769517ff8(self, method, url, body, headers):
body = self.fixtures.load(
'network_vlan_0e56433f_d808_4669_821d_812769517ff8.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_editVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_deleteVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_expandVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}expandVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_expandVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_addPublicIpBlock(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addPublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_addPublicIpBlock.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba(self, method, url, body, headers):
body = self.fixtures.load(
'network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock(self, method, url, body, headers):
body = self.fixtures.load(
'network_publicIpBlock.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8(self, method, url, body, headers):
body = self.fixtures.load(
'network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_removePublicIpBlock(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removePublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_removePublicIpBlock.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule(self, method, url, body, headers):
body = self.fixtures.load(
'network_firewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createFirewallRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_createFirewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c(self, method, url, body, headers):
body = self.fixtures.load(
'network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editFirewallRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_editFirewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteFirewallRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_deleteFirewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createNatRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_createNatRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule(self, method, url, body, headers):
body = self.fixtures.load(
'network_natRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce(self, method, url, body, headers):
body = self.fixtures.load(
'network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNatRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'network_deleteNatRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addNic(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_addNic.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeNic(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_removeNic.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_disableServerMonitoring(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}disableServerMonitoring":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_disableServerMonitoring.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_enableServerMonitoring(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}enableServerMonitoring":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_enableServerMonitoring.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeServerMonitoringPlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}changeServerMonitoringPlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_changeServerMonitoringPlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/image_osImage.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(self, method, url, body, headers):
body = self.fixtures.load(
'image_osImage_BAD_REQUEST.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(self, method, url, body, headers):
body = self.fixtures.load(
'image_osImage_BAD_REQUEST.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_FAKE_IMAGE_ID(self, method, url, body, headers):
body = self.fixtures.load(
'image_osImage_BAD_REQUEST.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/image_customerImage.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(self, method, url, body, headers):
body = self.fixtures.load(
'2.4/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_FAKE_IMAGE_ID(self, method, url, body, headers):
body = self.fixtures.load(
'image_customerImage_BAD_REQUEST.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_reconfigureServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}reconfigureServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'server_reconfigureServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cleanServer(self, method, url, body, headers):
body = self.fixtures.load(
'server_cleanServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addDisk(self, method, url, body, headers):
body = self.fixtures.load(
'server_addDisk.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeDisk(self, method, url, body, headers):
body = self.fixtures.load(
'server_removeDisk.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, 'name', TYPES_URN)
description = findtext(request, 'description', TYPES_URN)
value_required = findtext(request, 'valueRequired', TYPES_URN)
display_on_report = findtext(request, 'displayOnReport', TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is not None:
raise ValueError("Default description for a tag should be blank")
if value_required is None or value_required != 'true':
raise ValueError("Default valueRequired should be true")
if display_on_report is None or display_on_report != 'true':
raise ValueError("Default displayOnReport should be true")
body = self.fixtures.load(
'tag_createTagKey.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_ALLPARAMS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, 'name', TYPES_URN)
description = findtext(request, 'description', TYPES_URN)
value_required = findtext(request, 'valueRequired', TYPES_URN)
display_on_report = findtext(request, 'displayOnReport', TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is None:
raise ValueError("Description should have a value")
if value_required is None or value_required != 'false':
raise ValueError("valueRequired should be false")
if display_on_report is None or display_on_report != 'false':
raise ValueError("displayOnReport should be false")
body = self.fixtures.load(
'tag_createTagKey.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_BADREQUEST(self, method, url, body, headers):
body = self.fixtures.load(
'tag_createTagKey_BADREQUEST.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey(self, method, url, body, headers):
body = self.fixtures.load(
'tag_tagKey_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_SINGLE(self, method, url, body, headers):
body = self.fixtures.load(
'tag_tagKey_list_SINGLE.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_ALLFILTERS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'id':
assert value == 'fake_id'
elif key == 'name':
assert value == 'fake_name'
elif key == 'valueRequired':
assert value == 'false'
elif key == 'displayOnReport':
assert value == 'false'
elif key == 'pageSize':
assert value == '250'
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'tag_tagKey_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075(self, method, url, body, headers):
body = self.fixtures.load(
'tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075_NOEXIST(self, method, url, body, headers):
body = self.fixtures.load(
'tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54_BADREQUEST.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NAME(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, 'name', TYPES_URN)
description = findtext(request, 'description', TYPES_URN)
value_required = findtext(request, 'valueRequired', TYPES_URN)
display_on_report = findtext(request, 'displayOnReport', TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is not None:
raise ValueError("Description should be empty")
if value_required is not None:
raise ValueError("valueRequired should be empty")
if display_on_report is not None:
raise ValueError("displayOnReport should be empty")
body = self.fixtures.load(
'tag_editTagKey.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOTNAME(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, 'name', TYPES_URN)
description = findtext(request, 'description', TYPES_URN)
value_required = findtext(request, 'valueRequired', TYPES_URN)
display_on_report = findtext(request, 'displayOnReport', TYPES_URN)
if name is not None:
raise ValueError("Name should be empty")
if description is None:
raise ValueError("Description should not be empty")
if value_required is None:
raise ValueError("valueRequired should not be empty")
if display_on_report is None:
raise ValueError("displayOnReport should not be empty")
body = self.fixtures.load(
'tag_editTagKey.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOCHANGE(self, method, url, body, headers):
body = self.fixtures.load(
'tag_editTagKey_BADREQUEST.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteTagKey":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'tag_deleteTagKey.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey_NOEXIST(self, method, url, body, headers):
body = self.fixtures.load(
'tag_deleteTagKey_BADREQUEST.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}applyTags":
raise InvalidRequestError(request.tag)
asset_type = findtext(request, 'assetType', TYPES_URN)
asset_id = findtext(request, 'assetId', TYPES_URN)
tag = request.find(fixxpath('tag', TYPES_URN))
tag_key_name = findtext(tag, 'tagKeyName', TYPES_URN)
value = findtext(tag, 'value', TYPES_URN)
if asset_type is None:
raise ValueError("assetType should not be empty")
if asset_id is None:
raise ValueError("assetId should not be empty")
if tag_key_name is None:
raise ValueError("tagKeyName should not be empty")
if value is None:
raise ValueError("value should not be empty")
body = self.fixtures.load(
'tag_applyTags.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOVALUE(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}applyTags":
raise InvalidRequestError(request.tag)
asset_type = findtext(request, 'assetType', TYPES_URN)
asset_id = findtext(request, 'assetId', TYPES_URN)
tag = request.find(fixxpath('tag', TYPES_URN))
tag_key_name = findtext(tag, 'tagKeyName', TYPES_URN)
value = findtext(tag, 'value', TYPES_URN)
if asset_type is None:
raise ValueError("assetType should not be empty")
if asset_id is None:
raise ValueError("assetId should not be empty")
if tag_key_name is None:
raise ValueError("tagKeyName should not be empty")
if value is not None:
raise ValueError("value should be empty")
body = self.fixtures.load(
'tag_applyTags.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOTAGKEY(self, method, url, body, headers):
body = self.fixtures.load(
'tag_applyTags_BADREQUEST.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeTags":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'tag_removeTag.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags_NOTAG(self, method, url, body, headers):
body = self.fixtures.load(
'tag_removeTag_BADREQUEST.xml'
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag(self, method, url, body, headers):
body = self.fixtures.load(
'tag_tag_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag_ALLPARAMS(self, method, url, body, headers):
(_, params) = url.split('?')
parameters = params.split('&')
for parameter in parameters:
(key, value) = parameter.split('=')
if key == 'assetId':
assert value == 'fake_asset_id'
elif key == 'assetType':
assert value == 'fake_asset_type'
elif key == 'valueRequired':
assert value == 'false'
elif key == 'displayOnReport':
assert value == 'false'
elif key == 'pageSize':
assert value == '250'
elif key == 'datacenterId':
assert value == 'fake_location'
elif key == 'value':
assert value == 'fake_value'
elif key == 'tagKeyName':
assert value == 'fake_tag_key_name'
elif key == 'tagKeyId':
assert value == 'fake_tag_key_id'
else:
raise ValueError("Could not find in url parameters {0}:{1}".format(key, value))
body = self.fixtures.load(
'tag_tag_list.xml'
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList(
self, method, url, body, headers):
body = self.fixtures.load('ip_address_lists.xml')
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList_FILTERBYNAME(
self, method, url, body, headers):
body = self.fixtures.load('ip_address_lists_FILTERBYNAME.xml')
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createIpAddressList(
self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" \
"createIpAddressList":
raise InvalidRequestError(request.tag)
net_domain = findtext(request, 'networkDomainId', TYPES_URN)
if net_domain is None:
raise ValueError("Network Domain should not be empty")
name = findtext(request, 'name', TYPES_URN)
if name is None:
raise ValueError("Name should not be empty")
ip_version = findtext(request, 'ipVersion', TYPES_URN)
if ip_version is None:
raise ValueError("IP Version should not be empty")
ip_address_col_required = findall(request, 'ipAddress', TYPES_URN)
child_ip_address_required = findall(request, 'childIpAddressListId',
TYPES_URN)
if 0 == len(ip_address_col_required) and \
0 == len(child_ip_address_required):
raise ValueError("At least one ipAddress element or "
"one childIpAddressListId element must be "
"provided.")
if ip_address_col_required[0].get('begin') is None:
raise ValueError("IP Address should not be empty")
body = self.fixtures.load(
'ip_address_list_create.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editIpAddressList(
self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" \
"editIpAddressList":
raise InvalidRequestError(request.tag)
ip_address_list = request.get('id')
if ip_address_list is None:
raise ValueError("IpAddressList ID should not be empty")
name = findtext(request, 'name', TYPES_URN)
if name is not None:
raise ValueError("Name should not exists in request")
ip_version = findtext(request, 'ipVersion', TYPES_URN)
if ip_version is not None:
raise ValueError("IP Version should not exists in request")
ip_address_col_required = findall(request, 'ipAddress', TYPES_URN)
child_ip_address_required = findall(request, 'childIpAddressListId',
TYPES_URN)
if 0 == len(ip_address_col_required) and \
0 == len(child_ip_address_required):
raise ValueError("At least one ipAddress element or "
"one childIpAddressListId element must be "
"provided.")
if ip_address_col_required[0].get('begin') is None:
raise ValueError("IP Address should not be empty")
body = self.fixtures.load(
'ip_address_list_edit.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteIpAddressList(
self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" \
"deleteIpAddressList":
raise InvalidRequestError(request.tag)
ip_address_list = request.get('id')
if ip_address_list is None:
raise ValueError("IpAddressList ID should not be empty")
body = self.fixtures.load(
'ip_address_list_delete.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList(
self, method, url, body, headers):
body = self.fixtures.load(
'port_list_lists.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList_c8c92ea3_2da8_4d51_8153_f39bec794d69(
self, method, url, body, headers):
body = self.fixtures.load(
'port_list_get.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createPortList(
self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" \
"createPortList":
raise InvalidRequestError(request.tag)
net_domain = findtext(request, 'networkDomainId', TYPES_URN)
if net_domain is None:
raise ValueError("Network Domain should not be empty")
ports_required = findall(request, 'port', TYPES_URN)
child_port_list_required = findall(request, 'childPortListId',
TYPES_URN)
if 0 == len(ports_required) and \
0 == len(child_port_list_required):
raise ValueError("At least one port element or one "
"childPortListId element must be provided")
if ports_required[0].get('begin') is None:
raise ValueError("PORT begin value should not be empty")
body = self.fixtures.load(
'port_list_create.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editPortList(
self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" \
"editPortList":
raise InvalidRequestError(request.tag)
ports_required = findall(request, 'port', TYPES_URN)
child_port_list_required = findall(request, 'childPortListId',
TYPES_URN)
if 0 == len(ports_required) and \
0 == len(child_port_list_required):
raise ValueError("At least one port element or one "
"childPortListId element must be provided")
if ports_required[0].get('begin') is None:
raise ValueError("PORT begin value should not be empty")
body = self.fixtures.load(
'port_list_edit.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deletePortList(
self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" \
"deletePortList":
raise InvalidRequestError(request.tag)
port_list = request.get('id')
if port_list is None:
raise ValueError("Port List ID should not be empty")
body = self.fixtures.load(
'ip_address_list_delete.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cloneServer(
self, method, url, body, headers):
body = self.fixtures.load(
'2.4/server_clone_response.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_importImage(
self, method, url, body, headers):
body = self.fixtures.load(
'2.4/import_image_response.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_exchangeNicVlans(
self, method, url, body, headers):
body = self.fixtures.load(
'2.4/exchange_nic_vlans_response.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeNetworkAdapter(
self, method, url, body, headers):
body = self.fixtures.load(
'2.4/change_nic_networkadapter_response.xml'
)
return httplib.OK, body, {}, httplib.responses[httplib.OK]
if __name__ == '__main__':
sys.exit(unittest.main())
| 48.2
| 159
| 0.649476
| 19,065
| 161,952
| 5.214896
| 0.044637
| 0.044658
| 0.035727
| 0.031381
| 0.889723
| 0.859759
| 0.834564
| 0.801664
| 0.769478
| 0.726871
| 0
| 0.068303
| 0.250661
| 161,952
| 3,359
| 160
| 48.21435
| 0.75095
| 0.007515
| 0
| 0.613906
| 0
| 0
| 0.138402
| 0.081329
| 0
| 0
| 0
| 0
| 0.152341
| 1
| 0.125437
| false
| 0.015374
| 0.010133
| 0.000349
| 0.187631
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22df3396563e93fdf6625a94e86d9045e55e3195
| 30
|
py
|
Python
|
rss2sql/__init__.py
|
alexandre-mbm/rss2sql
|
e5eab86fdb3013d025ed6a1e288c5a25513d0ebd
|
[
"MIT"
] | null | null | null |
rss2sql/__init__.py
|
alexandre-mbm/rss2sql
|
e5eab86fdb3013d025ed6a1e288c5a25513d0ebd
|
[
"MIT"
] | 1
|
2020-09-20T21:42:31.000Z
|
2020-09-20T21:42:31.000Z
|
rss2sql/__init__.py
|
alexandre-mbm/rss2sql
|
e5eab86fdb3013d025ed6a1e288c5a25513d0ebd
|
[
"MIT"
] | 1
|
2020-09-20T19:04:52.000Z
|
2020-09-20T19:04:52.000Z
|
from .rss2sql import SQL, RSS
| 15
| 29
| 0.766667
| 5
| 30
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.166667
| 30
| 1
| 30
| 30
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22f02698265235978ab242dc0d64d1a6cbcdc38f
| 34
|
py
|
Python
|
aoc/d17/__init__.py
|
klittlepage/aoc2020
|
7135ac08263480a8cc9d6536d7caeb26bf85ae4f
|
[
"MIT"
] | null | null | null |
aoc/d17/__init__.py
|
klittlepage/aoc2020
|
7135ac08263480a8cc9d6536d7caeb26bf85ae4f
|
[
"MIT"
] | null | null | null |
aoc/d17/__init__.py
|
klittlepage/aoc2020
|
7135ac08263480a8cc9d6536d7caeb26bf85ae4f
|
[
"MIT"
] | null | null | null |
from aoc.d17.main import p_1, p_2
| 17
| 33
| 0.764706
| 9
| 34
| 2.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0.147059
| 34
| 1
| 34
| 34
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a3bad4667037c2152beffb0d6b3aecc038860f5c
| 13,220
|
py
|
Python
|
pandapower/test/api/test_std_types.py
|
mathildebadoual/pandapower
|
9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc
|
[
"BSD-3-Clause"
] | 1
|
2020-10-19T06:39:15.000Z
|
2020-10-19T06:39:15.000Z
|
pandapower/test/api/test_std_types.py
|
miek770/pandapower
|
de004efc1b7432a633792af4f551f7635a02db47
|
[
"BSD-3-Clause"
] | null | null | null |
pandapower/test/api/test_std_types.py
|
miek770/pandapower
|
de004efc1b7432a633792af4f551f7635a02db47
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
import pandapower as pp
def test_create_and_load_std_type_line():
net = pp.create_empty_network()
c = 40
r = 0.01
x = 0.02
i = 0.2
name = "test_line"
typdata = {}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="line")
typdata = {"c_nf_per_km": c}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="line")
typdata = {"c_nf_per_km": c, "r_ohm_per_km": r}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="line")
typdata = {"c_nf_per_km": c, "r_ohm_per_km": r, "x_ohm_per_km": x}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="line")
typdata = {"c_nf_per_km": c, "r_ohm_per_km": r, "x_ohm_per_km": x, "max_i_ka": i}
pp.create_std_type(net, name=name, data=typdata, element="line")
assert net.std_types["line"][name] == typdata
loaded_type = pp.load_std_type(net, name)
assert loaded_type == typdata
def test_create_std_types_line():
net = pp.create_empty_network()
c = 40
r = 0.01
x = 0.02
i = 0.2
typdata = {"c_nf_per_km": c, "r_ohm_per_km": r, "x_ohm_per_km": x, "max_i_ka": i}
typdatas = {"typ1": typdata, "typ2": typdata}
pp.create_std_types(net, data=typdatas, element="line")
assert net.std_types["line"]["typ1"] == typdata
assert net.std_types["line"]["typ1"] == typdata
def test_create_std_types_from_net_line():
net1 = pp.create_empty_network()
net2 = pp.create_empty_network()
c = 40
r = 0.01
x = 0.02
i = 0.2
typdata = {"c_nf_per_km": c, "r_ohm_per_km": r, "x_ohm_per_km": x, "max_i_ka": i,
"additional": 8}
pp.create_std_type(net1, typdata, "test_copy")
pp.copy_std_types(net2, net1, element="line")
assert pp.std_type_exists(net2, "test_copy")
def test_create_and_load_std_type_trafo():
net = pp.create_empty_network()
sn_kva = 40
vn_hv_kv = 110
vn_lv_kv = 20
vsc_percent = 5.
vscr_percent = 2.
pfe_kw=50
i0_percent = 0.1
shift_degree = 30
name = "test_trafo"
typdata = {"sn_kva": sn_kva}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_percent": vsc_percent}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_percent": vsc_percent,
"vscr_percent": vscr_percent}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_percent": vsc_percent,
"vscr_percent": vscr_percent, "pfe_kw": pfe_kw}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_percent": vsc_percent,
"vscr_percent": vscr_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo")
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_percent": vsc_percent,
"vscr_percent": vscr_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent,
"shift_degree": shift_degree}
pp.create_std_type(net, name=name, data=typdata, element="trafo")
assert net.std_types["trafo"][name] == typdata
loaded_type = pp.load_std_type(net, name, element="trafo")
assert loaded_type == typdata
def test_create_and_load_std_type_trafo3w():
net = pp.create_empty_network()
sn_hv_kva = 40; sn_mv_kva = 20; sn_lv_kva = 20
vn_hv_kv = 110; vn_mv_kv = 50; vn_lv_kv = 20
vsc_hv_percent = 5.; vsc_mv_percent = 5.; vsc_lv_percent = 5.
vscr_hv_percent = 2.; vscr_mv_percent = 2.; vscr_lv_percent = 2.
pfe_kw=50
i0_percent = 0.1
shift_mv_degree = 30; shift_lv_degree = 30
name = "test_trafo3w"
typdata = {"sn_hv_kva": sn_hv_kva}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_mv_kva": sn_mv_kva, "vn_hv_kv": vn_hv_kv}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_lv_kva": sn_lv_kva, "vn_mv_kv": vn_mv_kv, "vn_lv_kv": vn_lv_kv}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_mv_kva": sn_mv_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_hv_percent": vsc_hv_percent}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_hv_kva": sn_hv_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_mv_percent": vsc_mv_percent,
"vscr_hv_percent": vscr_hv_percent}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_hv_kva": sn_hv_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_lv_percent": vsc_lv_percent,
"vscr_mv_percent": vscr_mv_percent, "pfe_kw": pfe_kw}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_hv_kva": sn_hv_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_hv_percent": vsc_hv_percent,
"vscr_lv_percent": vscr_lv_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"sn_hv_kva": sn_hv_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_hv_percent": vsc_hv_percent,
"vscr_hv_percent": vscr_hv_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent,
"shift_mv_degree": shift_mv_degree}
with pytest.raises(UserWarning):
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
typdata = {"vn_hv_kv": vn_hv_kv, "vn_mv_kv": vn_mv_kv, "vn_lv_kv": vn_lv_kv, "sn_hv_kva": sn_hv_kva,
"sn_mv_kva": sn_mv_kva, "sn_lv_kva": sn_lv_kva, "vsc_hv_percent": vsc_hv_percent, "vsc_mv_percent": vsc_mv_percent,
"vsc_lv_percent": vsc_lv_percent, "vscr_hv_percent": vscr_hv_percent, "vscr_mv_percent": vscr_mv_percent,
"vscr_lv_percent": vscr_lv_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent,
"shift_mv_degree":shift_mv_degree, "shift_lv_degree": shift_lv_degree}
pp.create_std_type(net, name=name, data=typdata, element="trafo3w")
assert net.std_types["trafo3w"][name] == typdata
loaded_type = pp.load_std_type(net, name, element="trafo3w")
assert loaded_type == typdata
def test_create_std_types_trafo():
net = pp.create_empty_network()
sn_kva = 40
vn_hv_kv = 110
vn_lv_kv = 20
vsc_percent = 5.
vscr_percent = 2.
pfe_kw=50
i0_percent = 0.1
shift_degree = 30
typdata = {"sn_kva": sn_kva, "vn_hv_kv": vn_hv_kv, "vn_lv_kv": vn_lv_kv, "vsc_percent": vsc_percent,
"vscr_percent": vscr_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent,
"shift_degree": shift_degree}
typdatas = {"typ1": typdata, "typ2": typdata}
pp.create_std_types(net, data=typdatas, element="trafo")
assert net.std_types["trafo"]["typ1"] == typdata
assert net.std_types["trafo"]["typ2"] == typdata
def test_create_std_types_trafo3w():
net = pp.create_empty_network()
sn_hv_kva = 40; sn_mv_kva = 20; sn_lv_kva = 20
vn_hv_kv = 110; vn_mv_kv = 50; vn_lv_kv = 20
vsc_hv_percent = 5.; vsc_mv_percent = 5.; vsc_lv_percent = 5.
vscr_hv_percent = 2.; vscr_mv_percent = 2.; vscr_lv_percent = 2.
pfe_kw=50
i0_percent = 0.1
shift_mv_degree = 30; shift_lv_degree = 30
typdata = {"vn_hv_kv": vn_hv_kv, "vn_mv_kv": vn_mv_kv, "vn_lv_kv": vn_lv_kv, "sn_hv_kva": sn_hv_kva,
"sn_mv_kva": sn_mv_kva, "sn_lv_kva": sn_lv_kva, "vsc_hv_percent": vsc_hv_percent, "vsc_mv_percent": vsc_mv_percent,
"vsc_lv_percent": vsc_lv_percent, "vscr_hv_percent": vscr_hv_percent, "vscr_mv_percent": vscr_mv_percent,
"vscr_lv_percent": vscr_lv_percent, "pfe_kw": pfe_kw, "i0_percent": i0_percent,
"shift_mv_degree":shift_mv_degree, "shift_lv_degree": shift_lv_degree}
typdatas = {"typ1": typdata, "typ2": typdata}
pp.create_std_types(net, data=typdatas, element="trafo3w")
assert net.std_types["trafo3w"]["typ1"] == typdata
assert net.std_types["trafo3w"]["typ2"] == typdata
def test_find_line_type():
net = pp.create_empty_network()
c = 40000
r = 1.5
x = 2.0
i = 10
name = "test_line1"
typdata = {"c_nf_per_km": c, "r_ohm_per_km": r, "x_ohm_per_km": x, "max_i_ka": i}
pp.create_std_type(net, data=typdata, name=name, element="line")
fitting_type = pp.find_std_type_by_parameter(net, typdata)
assert len(fitting_type) == 1
assert fitting_type[0] == name
fitting_type = pp.find_std_type_by_parameter(net, {"r_ohm_per_km":r+0.05}, epsilon=.06)
assert len(fitting_type) == 1
assert fitting_type[0] == name
fitting_type = pp.find_std_type_by_parameter(net, {"r_ohm_per_km":r+0.07}, epsilon=.06)
assert len(fitting_type) == 0
def test_change_type_line():
net = pp.create_empty_network()
r1 = 0.01
x1 = 0.02
c1 = 40
i1 = 0.2
name1 = "test_line1"
typ1 = {"c_nf_per_km": c1, "r_ohm_per_km": r1, "x_ohm_per_km": x1, "max_i_ka": i1}
pp.create_std_type(net, data=typ1, name=name1, element="line")
r2 = 0.02
x2 = 0.04
c2 = 20
i2 = 0.4
name2 = "test_line2"
typ2 = {"c_nf_per_km": c2, "r_ohm_per_km": r2, "x_ohm_per_km": x2, "max_i_ka": i2}
pp.create_std_type(net, data=typ2, name=name2, element="line")
b1 = pp.create_bus(net, vn_kv=0.4)
b2 = pp.create_bus(net, vn_kv=0.4)
lid = pp.create_line(net, b1, b2, 1., std_type=name1)
assert net.line.r_ohm_per_km.at[lid] == r1
assert net.line.x_ohm_per_km.at[lid] == x1
assert net.line.c_nf_per_km.at[lid] == c1
assert net.line.max_i_ka.at[lid] == i1
assert net.line.std_type.at[lid] == name1
pp.change_std_type(net, lid, name2)
assert net.line.r_ohm_per_km.at[lid] == r2
assert net.line.x_ohm_per_km.at[lid] == x2
assert net.line.c_nf_per_km.at[lid] == c2
assert net.line.max_i_ka.at[lid] == i2
assert net.line.std_type.at[lid] == name2
def test_parameter_from_std_type_line():
net = pp.create_empty_network()
r1 = 0.01
x1 = 0.02
c1 = 40
i1 = 0.2
name1 = "test_line1"
typ1 = {"c_nf_per_km": c1, "r_ohm_per_km": r1, "x_ohm_per_km": x1, "max_i_ka": i1}
pp.create_std_type(net, data=typ1, name=name1, element="line")
r2 = 0.02
x2 = 0.04
c2 = 20
i2 = 0.4
endtemp2 = 40
endtemp_fill = 20
name2 = "test_line2"
typ2 = {"c_nf_per_km": c2, "r_ohm_per_km": r2, "x_ohm_per_km": x2, "max_i_ka": i2,
"endtemp_degree": endtemp2}
pp.create_std_type(net, data=typ2, name=name2, element="line")
b1 = pp.create_bus(net, vn_kv=0.4)
b2 = pp.create_bus(net, vn_kv=0.4)
lid1 = pp.create_line(net, b1, b2, 1., std_type=name1)
lid2 = pp.create_line(net, b1, b2, 1., std_type=name2)
lid3 = pp.create_line_from_parameters(net, b1, b2, 1., r_ohm_per_km=0.03, x_ohm_per_km=0.04,
c_nf_per_km=20, max_i_ka=0.3)
pp.parameter_from_std_type(net, "endtemp_degree", fill=endtemp_fill)
assert net.line.endtemp_degree.at[lid1] == endtemp_fill #type1 one has not specified an endtemp
assert net.line.endtemp_degree.at[lid2] == endtemp2 #type2 has specified endtemp
assert net.line.endtemp_degree.at[lid3] == endtemp_fill #line3 has no standard type
net.line.endtemp_degree.at[lid3] = 10
pp.parameter_from_std_type(net, "endtemp_degree", fill=endtemp_fill)
assert net.line.endtemp_degree.at[lid3] == 10 #check that existing values arent overwritten
if __name__ == "__main__":
# net = pp.create_empty_network()
pytest.main(["test_std_types.py"])
| 41.442006
| 125
| 0.659682
| 2,178
| 13,220
| 3.620753
| 0.069789
| 0.051737
| 0.02739
| 0.030434
| 0.890058
| 0.88562
| 0.85823
| 0.791402
| 0.780878
| 0.741187
| 0
| 0.034844
| 0.205446
| 13,220
| 318
| 126
| 41.572327
| 0.715918
| 0.026475
| 0
| 0.627451
| 0
| 0
| 0.154654
| 0
| 0
| 0
| 0
| 0
| 0.12549
| 1
| 0.039216
| false
| 0
| 0.007843
| 0
| 0.047059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3cee3718a23abecdb9711855e2ea4b4e7f4e0a4
| 46
|
py
|
Python
|
src/Set/lengthSet.py
|
mikeludemann/helperFunctions_Python
|
62b1e8279eee216f3603f55cf2d010d611e3be0e
|
[
"MIT"
] | null | null | null |
src/Set/lengthSet.py
|
mikeludemann/helperFunctions_Python
|
62b1e8279eee216f3603f55cf2d010d611e3be0e
|
[
"MIT"
] | null | null | null |
src/Set/lengthSet.py
|
mikeludemann/helperFunctions_Python
|
62b1e8279eee216f3603f55cf2d010d611e3be0e
|
[
"MIT"
] | null | null | null |
x = set([71, 12, 3, 18, 2, 21])
print(len(x))
| 15.333333
| 31
| 0.5
| 11
| 46
| 2.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27027
| 0.195652
| 46
| 3
| 32
| 15.333333
| 0.351351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
430ec54573caadc5c6e31645edd0b588a87a55aa
| 72
|
py
|
Python
|
elliot/evaluation/metrics/bias/pop_reo/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 175
|
2021-03-04T15:46:25.000Z
|
2022-03-31T05:56:58.000Z
|
elliot/evaluation/metrics/bias/pop_reo/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 15
|
2021-03-06T17:53:56.000Z
|
2022-03-24T17:02:07.000Z
|
elliot/evaluation/metrics/bias/pop_reo/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 39
|
2021-03-04T15:46:26.000Z
|
2022-03-09T15:37:12.000Z
|
from .pop_reo import PopREO
from .extended_pop_reo import ExtendedPopREO
| 36
| 44
| 0.875
| 11
| 72
| 5.454545
| 0.636364
| 0.2
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097222
| 72
| 2
| 44
| 36
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
433227d118a5111e80dd004d3af2da62940eeb3d
| 36,677
|
py
|
Python
|
disturbance/components/proposals/email.py
|
preranaandure/disturbance
|
533b83537df6dbd7ccbb0701fd5b89d5ef11f9aa
|
[
"Apache-2.0"
] | null | null | null |
disturbance/components/proposals/email.py
|
preranaandure/disturbance
|
533b83537df6dbd7ccbb0701fd5b89d5ef11f9aa
|
[
"Apache-2.0"
] | null | null | null |
disturbance/components/proposals/email.py
|
preranaandure/disturbance
|
533b83537df6dbd7ccbb0701fd5b89d5ef11f9aa
|
[
"Apache-2.0"
] | null | null | null |
import logging
from django.core.mail import EmailMultiAlternatives, EmailMessage
from django.utils.encoding import smart_text
from django.core.urlresolvers import reverse
from django.conf import settings
from disturbance.components.emails.emails import TemplateEmailBase
from ledger.accounts.models import EmailUser
logger = logging.getLogger(__name__)
SYSTEM_NAME = settings.SYSTEM_NAME_SHORT + ' Automated Message'
def get_sender_user():
sender = settings.DEFAULT_FROM_EMAIL
try:
sender_user = EmailUser.objects.get(email__icontains=sender)
except:
EmailUser.objects.create(email=sender, password='')
sender_user = EmailUser.objects.get(email__icontains=sender)
return sender_user
class ReferralSendNotificationEmail(TemplateEmailBase):
subject = 'A referral for a proposal has been sent to you.'
html_template = 'disturbance/emails/proposals/send_referral_notification.html'
txt_template = 'disturbance/emails/proposals/send_referral_notification.txt'
class ReferralCompleteNotificationEmail(TemplateEmailBase):
subject = 'A referral for a proposal has been completed.'
html_template = 'disturbance/emails/proposals/send_referral_complete_notification.html'
txt_template = 'disturbance/emails/proposals/send_referral_complete_notification.txt'
class ReferralRecallNotificationEmail(TemplateEmailBase):
subject = 'A referral for a proposal has been recalled.'
html_template = 'disturbance/emails/proposals/send_referral_recall_notification.html'
txt_template = 'disturbance/emails/proposals/send_referral_recall_notification.txt'
class ProposalDeclineSendNotificationEmail(TemplateEmailBase):
subject = 'Your Proposal has been declined.'
html_template = 'disturbance/emails/proposals/send_decline_notification.html'
txt_template = 'disturbance/emails/proposals/send_decline_notification.txt'
class ProposalApprovalSendNotificationEmail(TemplateEmailBase):
subject = 'Your Proposal has been approved.'
html_template = 'disturbance/emails/proposals/send_approval_notification.html'
txt_template = 'disturbance/emails/proposals/send_approval_notification.txt'
class AmendmentRequestSendNotificationEmail(TemplateEmailBase):
subject = 'An amendment to your Proposal is required.'
html_template = 'disturbance/emails/proposals/send_amendment_notification.html'
txt_template = 'disturbance/emails/proposals/send_amendment_notification.txt'
class SubmitSendNotificationEmail(TemplateEmailBase):
subject = 'A new Proposal has been submitted.'
html_template = 'disturbance/emails/proposals/send_submit_notification.html'
txt_template = 'disturbance/emails/proposals/send_submit_notification.txt'
class AssessmentReminderSendNotificationEmail(TemplateEmailBase):
subject = 'A Proposal is waiting for assessment.'
html_template = 'disturbance/emails/proposals/send_assessment_reminder_notification.html'
txt_template = 'disturbance/emails/proposals/send_assessment_reminder_notification.txt'
class ExternalSubmitSendNotificationEmail(TemplateEmailBase):
subject = 'A new Proposal has been submitted.'
html_template = 'disturbance/emails/proposals/send_external_submit_notification.html'
txt_template = 'disturbance/emails/proposals/send_external_submit_notification.txt'
class ApproverDeclineSendNotificationEmail(TemplateEmailBase):
subject = 'A Proposal has been recommended for decline.'
html_template = 'disturbance/emails/proposals/send_approver_decline_notification.html'
txt_template = 'disturbance/emails/proposals/send_approver_decline_notification.txt'
class ApproverApproveSendNotificationEmail(TemplateEmailBase):
subject = 'A Proposal has been recommended for approval.'
html_template = 'disturbance/emails/proposals/send_approver_approve_notification.html'
txt_template = 'disturbance/emails/proposals/send_approver_approve_notification.txt'
class ApproverSendBackNotificationEmail(TemplateEmailBase):
subject = 'A Proposal has been sent back by approver.'
html_template = 'disturbance/emails/proposals/send_approver_sendback_notification.html'
txt_template = 'disturbance/emails/proposals/send_approver_sendback_notification.txt'
## Apiary Templates
class ApiaryReferralSendNotificationEmail(TemplateEmailBase):
subject = 'A referral for an application has been sent to you.'
html_template = 'disturbance/emails/proposals/apiary_send_referral_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_referral_notification.txt'
class ApiaryReferralCompleteNotificationEmail(TemplateEmailBase):
subject = 'A referral for an application has been completed.'
html_template = 'disturbance/emails/proposals/apiary_send_referral_complete_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_referral_complete_notification.txt'
class ApiaryReferralRecallNotificationEmail(TemplateEmailBase):
subject = 'A referral for an application has been recalled.'
html_template = 'disturbance/emails/proposals/apiary_send_referral_recall_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_referral_recall_notification.txt'
class ApiaryProposalDeclineSendNotificationEmail(TemplateEmailBase):
subject = 'Your Application has been declined.'
html_template = 'disturbance/emails/proposals/apiary_send_decline_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_decline_notification.txt'
class ApiaryProposalApprovalSiteTransferSendNotificationEmail(TemplateEmailBase):
#subject = 'Your Application has been approved.'
subject = 'Your Licence has been issued.'
html_template = 'disturbance/emails/proposals/apiary_send_approval_site_transfer_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_approval_site_transfer_notification.txt'
class ApiaryProposalApprovalSendNotificationEmail(TemplateEmailBase):
subject = 'Your Application has been approved.'
html_template = 'disturbance/emails/proposals/apiary_send_approval_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_approval_notification.txt'
class ApiaryAmendmentRequestSendNotificationEmail(TemplateEmailBase):
subject = 'An amendment to your Application is required.'
html_template = 'disturbance/emails/proposals/apiary_send_amendment_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_amendment_notification.txt'
class ApiarySubmitSendNotificationEmail(TemplateEmailBase):
subject = 'A new Application has been submitted.'
html_template = 'disturbance/emails/proposals/apiary_send_submit_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_submit_notification.txt'
class ApiaryAssessmentReminderSendNotificationEmail(TemplateEmailBase):
subject = 'An Application is waiting for assessment.'
html_template = 'disturbance/emails/proposals/apiary_send_assessment_reminder_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_assessment_reminder_notification.txt'
class ApiaryExternalSubmitSendNotificationEmail(TemplateEmailBase):
subject = 'A new Application has been submitted.'
html_template = 'disturbance/emails/proposals/apiary_send_external_submit_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_external_submit_notification.txt'
class ApiaryApproverDeclineSendNotificationEmail(TemplateEmailBase):
subject = 'An Application has been recommended for decline.'
html_template = 'disturbance/emails/proposals/apiary_send_approver_decline_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_approver_decline_notification.txt'
class ApiaryApproverApproveSendNotificationEmail(TemplateEmailBase):
subject = 'An Application has been recommended for approval.'
html_template = 'disturbance/emails/proposals/apiary_send_approver_approve_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_approver_approve_notification.txt'
class ApiaryApproverSendBackNotificationEmail(TemplateEmailBase):
subject = 'An Application has been sent back by approver.'
html_template = 'disturbance/emails/proposals/apiary_send_approver_sendback_notification.html'
txt_template = 'disturbance/emails/proposals/apiary_send_approver_sendback_notification.txt'
def send_referral_email_notification(referral,request,reminder=False):
email = ReferralSendNotificationEmail()
url = request.build_absolute_uri(reverse('internal-referral-detail',kwargs={'proposal_pk':referral.proposal.id,'referral_pk':referral.id}))
context = {
'proposal': referral.proposal,
'url': url,
'reminder':reminder,
'comments': referral.text
}
msg = email.send(referral.referral.email, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_referral_email(msg, referral, sender=sender)
if referral.proposal.applicant:
_log_org_email(msg, referral.proposal.applicant, referral.referral, sender=sender)
def send_referral_recall_email_notification(referral,request):
email = ReferralRecallNotificationEmail()
url = request.build_absolute_uri(reverse('internal-referral-detail',kwargs={'proposal_pk':referral.proposal.id,'referral_pk':referral.id}))
context = {
'proposal': referral.proposal,
'url': url,
}
msg = email.send(referral.referral.email, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_referral_email(msg, referral, sender=sender)
if referral.proposal.applicant:
_log_org_email(msg, referral.proposal.applicant, referral.referral, sender=sender)
def send_referral_complete_email_notification(referral,request):
email = ReferralCompleteNotificationEmail()
url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': referral.proposal.id}))
context = {
'proposal': referral.proposal,
'url': url,
'referral_comments': referral.referral_text
}
msg = email.send(referral.sent_by.email, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_referral_email(msg, referral, sender=sender)
if referral.proposal.applicant:
_log_org_email(msg, referral.proposal.applicant, referral.referral, sender=sender)
def send_apiary_referral_email_notification(referral,recipients,request,reminder=False):
email = ApiaryReferralSendNotificationEmail()
url = request.build_absolute_uri(reverse('internal-referral-detail',kwargs={'proposal_pk':referral.proposal.id,'referral_pk':referral.id}))
context = {
'proposal': referral.proposal,
'url': url,
'reminder':reminder,
'comments': referral.text
}
#msg = email.send(referral.referral.email, context=context)
#recipients = list(ReferralRecipientGroup.objects.get(name=referral.email_group).members.all().values_list('email', flat=True))
msg = email.send(recipients, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_referral_email(msg, referral, sender=sender)
#if referral.proposal.applicant:
# _log_org_email(msg, referral.proposal.applicant, referral.apiary_referral.referral_group.members_email, sender=sender)
#elif referral.proposal.applicant_field == 'proxy_applicant':
# _log_user_email(msg, referral.proposal.proxy_applicant, referral.apiary_referral.referral_group.members_email, sender=sender)
#else:
# _log_user_email(msg, referral.proposal.submitter, referral.apiary_referral.referral_group.members_email, sender=sender)
if referral.proposal.applicant:
_log_org_email(email_message=msg, organisation=referral.proposal.applicant, customer=None, sender=sender)
else:
_log_user_email(email_message=msg, emailuser=referral.proposal.submitter, customer=None, sender=sender)
## BB 20200610 this is not called at present, in line with existing DAS behaviour
#def send_apiary_referral_recall_email_notification(referral,recipients,request):
# email = ReferralRecallNotificationEmail()
# url = request.build_absolute_uri(reverse('internal-referral-detail',kwargs={'proposal_pk':referral.proposal.id,'referral_pk':referral.id}))
#
# context = {
# 'proposal': referral.proposal,
# 'url': url,
# }
#
# #msg = email.send(referral.referral.email, context=context)
# msg = email.send(recipients, context=context)
# sender = request.user if request else settings.DEFAULT_FROM_EMAIL
# _log_proposal_referral_email(msg, referral, sender=sender)
# if referral.proposal.applicant:
# _log_org_email(msg, referral.proposal.applicant, referral.apiary_referral.referral_group.members_email, sender=sender)
# elif referral.proposal.applicant_field == 'proxy_applicant':
# _log_user_email(msg, referral.proposal.proxy_applicant, referral.apiary_referral.referral_group.members_email, sender=sender)
# else:
# _log_user_email(msg, referral.proposal.submitter, referral.apiary_referral.referral_group.members_email, sender=sender)
def send_apiary_referral_complete_email_notification(referral,request, completed_by):
email = ApiaryReferralCompleteNotificationEmail()
email.subject = referral.sent_by.email + ': ' + email.subject
url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': referral.proposal.id}))
context = {
#'completed_by': referral.referral,
'completed_by': completed_by,
'proposal': referral.proposal,
'url': url,
'referral_comments': referral.referral_text
}
#msg = email.send(referral.sent_by.email,attachments=attachments, context=context)
msg = email.send(referral.sent_by.email, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_referral_email(msg, referral, sender=sender)
#if referral.proposal.applicant:
# _log_org_email(msg, referral.proposal.applicant, referral.apiary_referral.referral_group.members_email, sender=sender)
#elif referral.proposal.applicant_field == 'proxy_applicant':
# _log_user_email(msg, referral.proposal.proxy_applicant, referral.apiary_referral.referral_group.members_email, sender=sender)
#else:
# _log_user_email(msg, referral.proposal.submitter, referral.apiary_referral.referral_group.members_email, sender=sender)
if referral.proposal.applicant:
_log_org_email(email_message=msg, organisation=referral.proposal.applicant, customer=None, sender=sender)
else:
_log_user_email(email_message=msg, emailuser=referral.proposal.submitter, customer=None, sender=sender)
def send_amendment_email_notification(amendment_request, request, proposal):
if proposal.apiary_group_application_type:
email = ApiaryAmendmentRequestSendNotificationEmail()
else:
email = AmendmentRequestSendNotificationEmail()
#reason = amendment_request.get_reason_display()
reason = amendment_request.reason.reason
url = request.build_absolute_uri(reverse('external-proposal-detail',kwargs={'proposal_pk': proposal.id}))
if "-internal" in url:
# remove '-internal'. This email is for external submitters
url = ''.join(url.split('-internal'))
attachments = []
if amendment_request.amendment_request_documents:
for doc in amendment_request.amendment_request_documents.all():
#file_name = doc._file.name
file_name = doc.name
attachment = (file_name, doc._file.file.read())
attachments.append(attachment)
context = {
'proposal': proposal,
'reason': reason,
'amendment_request_text': amendment_request.text,
'url': url
}
all_ccs = []
if proposal.applicant and proposal.applicant.email != proposal.submitter.email and proposal.applicant.email:
cc_list = proposal.applicant.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(proposal.submitter.email, cc=all_ccs, context=context, attachments=attachments)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_submit_email_notification(request, proposal):
if proposal.apiary_group_application_type:
email = ApiarySubmitSendNotificationEmail()
else:
email = SubmitSendNotificationEmail()
url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': proposal.id}))
if "-internal" not in url:
# add it. This email is for internal staff (assessors)
url = '-internal.{}'.format(settings.SITE_DOMAIN).join(url.split('.' + settings.SITE_DOMAIN))
context = {
'proposal': proposal,
'url': url
}
msg = email.send(proposal.assessor_recipients, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
# Don't log organisation if application submitted on behalf of an individual
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
return msg
def send_external_submit_email_notification(request, proposal):
if proposal.apiary_group_application_type:
email = ApiaryExternalSubmitSendNotificationEmail()
else:
email = ExternalSubmitSendNotificationEmail()
url = request.build_absolute_uri(reverse('external-proposal-detail',kwargs={'proposal_pk': proposal.id}))
if "-internal" in url:
# remove '-internal'. This email is for external submitters
url = ''.join(url.split('-internal'))
context = {
'proposal': proposal,
'submitter': proposal.submitter.get_full_name(),
'url': url
}
all_ccs = []
#if proposal.applicant and proposal.applicant.email:
#if proposal.applicant and proposal.applicant.email != proposal.submitter.email:
if proposal.applicant and proposal.applicant.email != proposal.submitter.email and proposal.applicant.email:
cc_list = proposal.applicant.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(proposal.submitter.email, cc= all_ccs, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
# Don't log organisation if application submitted on behalf of an individual
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
return msg
#send email when Proposal is 'proposed to decline' by assessor.
def send_approver_decline_email_notification(reason, request, proposal):
if proposal.apiary_group_application_type:
email = ApiaryApproverDeclineSendNotificationEmail()
else:
email = ApproverDeclineSendNotificationEmail()
url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': proposal.id}))
context = {
'proposal': proposal,
'reason': reason,
'url': url
}
msg = email.send(proposal.approver_recipients, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_approver_approve_email_notification(request, proposal):
if proposal.apiary_group_application_type:
email = ApiaryApproverApproveSendNotificationEmail()
else:
email = ApproverApproveSendNotificationEmail()
url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': proposal.id}))
context = {
'start_date' : proposal.proposed_issuance_approval.get('start_date'),
'expiry_date' : proposal.proposed_issuance_approval.get('expiry_date'),
'details': proposal.proposed_issuance_approval.get('details'),
'proposal': proposal,
'url': url
}
msg = email.send(proposal.approver_recipients, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_proposal_decline_email_notification(proposal,request,proposal_decline):
if proposal.apiary_group_application_type:
email = ApiaryProposalDeclineSendNotificationEmail()
else:
email = ProposalDeclineSendNotificationEmail()
context = {
'proposal': proposal,
}
cc_list = proposal_decline.cc_email
all_ccs = []
if cc_list:
all_ccs = cc_list.split(',')
#if proposal.applicant:
#if proposal.applicant and proposal.applicant.email != proposal.submitter.email:
if proposal.applicant and proposal.applicant.email != proposal.submitter.email and proposal.applicant.email:
# if proposal.applicant.email:
all_ccs.append(proposal.applicant.email)
msg = email.send(proposal.submitter.email, bcc=all_ccs, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_proposal_approver_sendback_email_notification(request, proposal):
if proposal.apiary_group_application_type:
email = ApiaryApproverSendBackNotificationEmail()
else:
email = ApproverSendBackNotificationEmail()
url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': proposal.id}))
context = {
'proposal': proposal,
'url': url,
'approver_comment': proposal.approver_comment
}
msg = email.send(proposal.assessor_recipients, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_proposal_approval_email_notification(proposal,request):
#import ipdb; ipdb.set_trace()
if proposal.apiary_group_application_type:
email = ApiaryProposalApprovalSendNotificationEmail()
else:
email = ProposalApprovalSendNotificationEmail()
if proposal.approval.reissued:
email.subject= 'Your Approval has been reissued.'
context = {
'proposal': proposal,
}
cc_list = proposal.proposed_issuance_approval['cc_email']
all_ccs = []
if cc_list:
all_ccs = cc_list.split(',')
#if proposal.applicant:
if proposal.applicant and proposal.applicant.email != proposal.submitter.email and proposal.applicant.email:
# if proposal.applicant.email:
all_ccs.append(proposal.applicant.email)
licence_document= proposal.approval.licence_document._file
if licence_document is not None:
file_name = proposal.approval.licence_document.name
attachment = (file_name, licence_document.file.read(), 'application/pdf')
attachment = [attachment]
else:
attachment = []
msg = email.send(proposal.submitter.email, bcc= all_ccs, attachments=attachment, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_site_transfer_approval_email_notification(proposal, request, approval):
email = ApiaryProposalApprovalSiteTransferSendNotificationEmail()
if approval.reissued:
email.subject= 'Your Licence has been reissued.'
context = {
'approval': approval,
'proposal': proposal,
}
cc_list = proposal.proposed_issuance_approval['cc_email']
all_ccs = []
if cc_list:
all_ccs = cc_list.split(',')
#if proposal.applicant:
if proposal.applicant and proposal.applicant.email != proposal.submitter.email and proposal.applicant.email:
# if proposal.applicant.email:
all_ccs.append(proposal.applicant.email)
licence_document= approval.licence_document._file
if licence_document is not None:
file_name = approval.licence_document.name
attachment = (file_name, licence_document.file.read(), 'application/pdf')
attachment = [attachment]
else:
attachment = []
#msg = email.send(proposal.submitter.email, bcc= all_ccs, attachments=attachment, context=context)
msg = email.send(approval.relevant_applicant_email, bcc= all_ccs, attachments=attachment, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
def send_assessment_reminder_email_notification(proposal):
if proposal.apiary_group_application_type:
email = ApiaryAssessmentReminderSendNotificationEmail()
else:
email = AssessmentReminderSendNotificationEmail()
#url = request.build_absolute_uri(reverse('internal-proposal-detail',kwargs={'proposal_pk': proposal.id}))
url=settings.SITE_URL if settings.SITE_URL else ''
url+=reverse('internal-proposal-detail',kwargs={'proposal_pk': proposal.id})
if "-internal" not in url:
# add it. This email is for internal staff (assessors)
url = '-internal.{}'.format(settings.SITE_DOMAIN).join(url.split('.' + settings.SITE_DOMAIN))
context = {
'proposal': proposal,
'url': url
}
msg = email.send(proposal.assessor_recipients, context=context)
#sender = request.user if request else settings.DEFAULT_FROM_EMAIL
sender = get_sender_user()
# try:
# sender_user = EmailUser.objects.get(email__icontains=sender)
# except:
# EmailUser.objects.create(email=sender, password='')
# sender_user = EmailUser.objects.get(email__icontains=sender)
_log_proposal_email(msg, proposal, sender=sender)
if proposal.applicant:
_log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender)
return msg
def _log_proposal_referral_email(email_message, referral, sender=None):
from disturbance.components.proposals.models import ProposalLogEntry
if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# TODO this will log the plain text body, should we log the html instead
text = email_message.body
subject = email_message.subject
fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# the to email is normally a list
if isinstance(email_message.to, list):
to = ','.join(email_message.to)
else:
to = smart_text(email_message.to)
# we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
all_ccs = []
if email_message.cc:
all_ccs += list(email_message.cc)
if email_message.bcc:
all_ccs += list(email_message.bcc)
all_ccs = ','.join(all_ccs)
else:
text = smart_text(email_message)
subject = ''
to = referral.proposal.applicant.email if referral.proposal.applicant.email else ''
fromm = smart_text(sender) if sender else SYSTEM_NAME
all_ccs = ''
customer = referral.referral
staff = sender
kwargs = {
'subject': subject,
'text': text,
'proposal': referral.proposal,
'customer': customer,
'staff': staff,
'to': to,
'fromm': fromm,
'cc': all_ccs
}
email_entry = ProposalLogEntry.objects.create(**kwargs)
return email_entry
def _log_proposal_email(email_message, proposal, sender=None):
from disturbance.components.proposals.models import ProposalLogEntry
if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
#import ipdb; ipdb.set_trace()
# TODO this will log the plain text body, should we log the html instead
text = email_message.body
subject = email_message.subject
fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# the to email is normally a list
if isinstance(email_message.to, list):
to = ','.join(email_message.to)
else:
to = smart_text(email_message.to)
# we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
all_ccs = []
if email_message.cc:
all_ccs += list(email_message.cc)
if email_message.bcc:
all_ccs += list(email_message.bcc)
all_ccs = ','.join(all_ccs)
else:
text = smart_text(email_message)
subject = ''
to = proposal.submitter.email
fromm = smart_text(sender) if sender else SYSTEM_NAME
all_ccs = ''
customer = proposal.submitter
staff = sender
kwargs = {
'subject': subject,
'text': text,
'proposal': proposal,
'customer': customer,
'staff': staff,
'to': to,
'fromm': fromm,
'cc': all_ccs
}
email_entry = ProposalLogEntry.objects.create(**kwargs)
return email_entry
def _log_org_email(email_message, organisation, customer ,sender=None):
from disturbance.components.organisations.models import OrganisationLogEntry
if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# TODO this will log the plain text body, should we log the html instead
text = email_message.body
subject = email_message.subject
fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# the to email is normally a list
if isinstance(email_message.to, list):
to = ','.join(email_message.to)
else:
to = smart_text(email_message.to)
# we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
all_ccs = []
if email_message.cc:
all_ccs += list(email_message.cc)
if email_message.bcc:
all_ccs += list(email_message.bcc)
all_ccs = ','.join(all_ccs)
else:
text = smart_text(email_message)
subject = ''
to = customer
fromm = smart_text(sender) if sender else SYSTEM_NAME
all_ccs = ''
customer = customer
staff = sender
kwargs = {
'subject': subject,
'text': text,
'organisation': organisation,
'customer': customer,
'staff': staff,
'to': to,
'fromm': fromm,
'cc': all_ccs
}
email_entry = OrganisationLogEntry.objects.create(**kwargs)
return email_entry
def _log_user_email(email_message, emailuser, customer ,sender=None):
from ledger.accounts.models import EmailUserLogEntry
if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# TODO this will log the plain text body, should we log the html instead
text = email_message.body
subject = email_message.subject
fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# the to email is normally a list
if isinstance(email_message.to, list):
to = ','.join(email_message.to)
else:
to = smart_text(email_message.to)
# we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
all_ccs = []
if email_message.cc:
all_ccs += list(email_message.cc)
if email_message.bcc:
all_ccs += list(email_message.bcc)
all_ccs = ','.join(all_ccs)
else:
text = smart_text(email_message)
subject = ''
to = customer
fromm = smart_text(sender) if sender else SYSTEM_NAME
all_ccs = ''
customer = customer
staff = sender
kwargs = {
'subject': subject,
'text': text,
'emailuser': emailuser,
'customer': customer,
'staff': staff,
'to': to,
'fromm': fromm,
'cc': all_ccs
}
email_entry = EmailUserLogEntry.objects.create(**kwargs)
return email_entry
#def _log_org_email(email_message, organisation, customer ,sender=None):
# from disturbance.components.organisations.models import OrganisationLogEntry
# if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# # TODO this will log the plain text body, should we log the html instead
# text = email_message.body
# subject = email_message.subject
# fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# # the to email is normally a list
# if isinstance(email_message.to, list):
# to = ','.join(email_message.to)
# else:
# to = smart_text(email_message.to)
# # we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
# all_ccs = []
# if email_message.cc:
# all_ccs += list(email_message.cc)
# if email_message.bcc:
# all_ccs += list(email_message.bcc)
# all_ccs = ','.join(all_ccs)
#
# else:
# text = smart_text(email_message)
# subject = ''
# to = customer
# fromm = smart_text(sender) if sender else SYSTEM_NAME
# all_ccs = ''
#
# customer = customer
#
# staff = sender
#
# kwargs = {
# 'subject': subject,
# 'text': text,
# 'organisation': organisation,
# 'customer': customer,
# 'staff': staff,
# 'to': to,
# 'fromm': fromm,
# 'cc': all_ccs
# }
#
# email_entry = OrganisationLogEntry.objects.create(**kwargs)
#
# return email_entry
#
#def _log_user_email(email_message, emailuser, referral_group_email_list, sender=None):
# from ledger.accounts.models import EmailUserLogEntry
# if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# # TODO this will log the plain text body, should we log the html instead
# text = email_message.body
# subject = email_message.subject
# fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# # the to email is normally a list
# if isinstance(email_message.to, list):
# to = ','.join(email_message.to)
# else:
# to = smart_text(email_message.to)
# # we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
# all_ccs = []
# if email_message.cc:
# all_ccs += list(email_message.cc)
# if email_message.bcc:
# all_ccs += list(email_message.bcc)
# all_ccs = ','.join(all_ccs)
#
# else:
# text = smart_text(email_message)
# subject = ''
# to = customer
# fromm = smart_text(sender) if sender else SYSTEM_NAME
# all_ccs = ''
#
# for customer in referral_group_email_list:
# customer_email_user = EmailUser.objects.get(email=customer)
#
# staff = sender
#
# kwargs = {
# 'subject': subject,
# 'text': text,
# #'emailuser': emailuser,
# 'emailuser': customer_email_user,
# 'customer': customer_email_user,
# 'staff': staff,
# 'to': to,
# 'fromm': fromm,
# 'cc': all_ccs
# }
#
# email_entry = EmailUserLogEntry.objects.create(**kwargs)
# # TODO - fix return statement
# return email_entry
#
| 42.846963
| 144
| 0.722196
| 4,127
| 36,677
| 6.193118
| 0.057427
| 0.038499
| 0.048906
| 0.066513
| 0.842404
| 0.815251
| 0.791932
| 0.779138
| 0.742478
| 0.689581
| 0
| 0.000268
| 0.185239
| 36,677
| 855
| 145
| 42.897076
| 0.855035
| 0.237015
| 0
| 0.605839
| 0
| 0
| 0.20298
| 0.133988
| 0
| 0
| 0
| 0.00117
| 0
| 1
| 0.036496
| false
| 0.001825
| 0.020073
| 0
| 0.25365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a47816ce1e78b0a28f7ac92b298f277db8335f0
| 11,334
|
py
|
Python
|
tests/unit/utils/test_cache.py
|
flowluap/OpenSlides
|
e0069f734adacd5a42183915230f17fc52336f22
|
[
"MIT"
] | null | null | null |
tests/unit/utils/test_cache.py
|
flowluap/OpenSlides
|
e0069f734adacd5a42183915230f17fc52336f22
|
[
"MIT"
] | null | null | null |
tests/unit/utils/test_cache.py
|
flowluap/OpenSlides
|
e0069f734adacd5a42183915230f17fc52336f22
|
[
"MIT"
] | null | null | null |
import json
from typing import Any, Dict, List
import pytest
from openslides.utils.cache import ChangeIdTooLowError, ElementCache
from .cache_provider import TTestCacheProvider, example_data, get_cachable_provider
def decode_dict(encoded_dict: Dict[str, str]) -> Dict[str, Any]:
"""
Helper function that loads the json values of a dict.
"""
return {key: json.loads(value) for key, value in encoded_dict.items()}
def sort_dict(
encoded_dict: Dict[str, List[Dict[str, Any]]]
) -> Dict[str, List[Dict[str, Any]]]:
"""
Helper function that sorts the value of a dict.
"""
return {
key: sorted(value, key=lambda x: x["id"]) for key, value in encoded_dict.items()
}
@pytest.fixture
def element_cache():
element_cache = ElementCache(
cache_provider_class=TTestCacheProvider,
cachable_provider=get_cachable_provider(),
default_change_id=0,
)
element_cache.ensure_cache()
return element_cache
@pytest.mark.asyncio
async def test_change_elements(element_cache):
input_data = {
"app/collection1:1": {"id": 1, "value": "updated"},
"app/collection1:2": {"id": 2, "value": "new"},
"app/collection2:1": {"id": 1, "key": "updated"},
"app/collection2:2": None, # Deleted
}
element_cache.cache_provider.full_data = {
"app/collection1:1": '{"id": 1, "value": "old"}',
"app/collection2:1": '{"id": 1, "key": "old"}',
"app/collection2:2": '{"id": 2, "key": "old"}',
}
result = await element_cache.change_elements(input_data)
assert result == 1 # first change_id
assert decode_dict(element_cache.cache_provider.full_data) == decode_dict(
{
"app/collection1:1": '{"id": 1, "value": "updated"}',
"app/collection1:2": '{"id": 2, "value": "new"}',
"app/collection2:1": '{"id": 1, "key": "updated"}',
}
)
assert element_cache.cache_provider.change_id_data == {
1: {
"app/collection1:1",
"app/collection1:2",
"app/collection2:1",
"app/collection2:2",
}
}
@pytest.mark.asyncio
async def test_change_elements_with_no_data_in_redis(element_cache):
input_data = {
"app/collection1:1": {"id": 1, "value": "updated"},
"app/collection1:2": {"id": 2, "value": "new"},
"app/collection2:1": {"id": 1, "key": "updated"},
"app/collection2:2": None,
"app/personalized-collection:2": None,
}
result = await element_cache.change_elements(input_data)
assert result == 1 # first change_id
assert decode_dict(element_cache.cache_provider.full_data) == decode_dict(
{
"app/collection1:1": '{"id": 1, "value": "updated"}',
"app/collection1:2": '{"id": 2, "value": "new"}',
"app/collection2:1": '{"id": 1, "key": "updated"}',
"app/personalized-collection:1": '{"id": 1, "key": "value1", "user_id": 1}',
}
)
assert element_cache.cache_provider.change_id_data == {
1: {
"app/collection1:1",
"app/collection1:2",
"app/collection2:1",
"app/collection2:2",
"app/personalized-collection:2",
}
}
@pytest.mark.asyncio
async def test_get_all_data_from_db(element_cache):
result = await element_cache.get_all_data_list()
assert result == example_data()
# Test that elements are written to redis
assert decode_dict(element_cache.cache_provider.full_data) == decode_dict(
{
"app/collection1:1": '{"id": 1, "value": "value1"}',
"app/collection1:2": '{"id": 2, "value": "value2"}',
"app/collection2:1": '{"id": 1, "key": "value1"}',
"app/collection2:2": '{"id": 2, "key": "value2"}',
"app/personalized-collection:1": '{"id": 1, "key": "value1", "user_id": 1}',
"app/personalized-collection:2": '{"id": 2, "key": "value2", "user_id": 2}',
}
)
@pytest.mark.asyncio
async def test_get_all_data_from_redis(element_cache):
element_cache.cache_provider.full_data = {
"app/collection1:1": '{"id": 1, "value": "value1"}',
"app/collection1:2": '{"id": 2, "value": "value2"}',
"app/collection2:1": '{"id": 1, "key": "value1"}',
"app/collection2:2": '{"id": 2, "key": "value2"}',
"app/personalized-collection:1": '{"id": 1, "key": "value1", "user_id": 1}',
"app/personalized-collection:2": '{"id": 2, "key": "value2", "user_id": 2}',
}
result = await element_cache.get_all_data_list()
# The output from redis has to be the same then the db_data
assert sort_dict(result) == sort_dict(example_data())
@pytest.mark.asyncio
async def test_get_data_since_change_id_0(element_cache):
element_cache.cache_provider.full_data = {
"app/collection1:1": '{"id": 1, "value": "value1"}',
"app/collection1:2": '{"id": 2, "value": "value2"}',
"app/collection2:1": '{"id": 1, "key": "value1"}',
"app/collection2:2": '{"id": 2, "key": "value2"}',
"app/personalized-collection:1": '{"id": 1, "key": "value1", "user_id": 1}',
"app/personalized-collection:2": '{"id": 2, "key": "value2", "user_id": 2}',
}
result = await element_cache.get_data_since(None, 0)
assert sort_dict(result[0]) == sort_dict(example_data())
@pytest.mark.asyncio
async def test_get_data_since_change_id_lower_than_in_redis(element_cache):
element_cache.cache_provider.full_data = {
"app/collection1:1": '{"id": 1, "value": "value1"}',
"app/collection1:2": '{"id": 2, "value": "value2"}',
"app/collection2:1": '{"id": 1, "key": "value1"}',
"app/collection2:2": '{"id": 2, "key": "value2"}',
}
element_cache.cache_provider.default_change_id = 2
element_cache.cache_provider.change_id_data = {2: {"app/collection1:1"}}
with pytest.raises(ChangeIdTooLowError):
await element_cache.get_data_since(None, 1)
@pytest.mark.asyncio
async def test_get_data_since_change_id_data_in_redis(element_cache):
element_cache.cache_provider.full_data = {
"app/collection1:1": '{"id": 1, "value": "value1"}',
"app/collection1:2": '{"id": 2, "value": "value2"}',
"app/collection2:1": '{"id": 1, "key": "value1"}',
"app/collection2:2": '{"id": 2, "key": "value2"}',
}
element_cache.cache_provider.change_id_data = {
1: {"app/collection1:1", "app/collection1:3"}
}
result = await element_cache.get_data_since(None, 1)
assert result == (
{"app/collection1": [{"id": 1, "value": "value1"}]},
["app/collection1:3"],
)
@pytest.mark.asyncio
async def test_get_data_since_change_id_data_in_db(element_cache):
element_cache.cache_provider.change_id_data = {
1: {"app/collection1:1", "app/collection1:3"}
}
result = await element_cache.get_data_since(None, 1)
assert result == (
{"app/collection1": [{"id": 1, "value": "value1"}]},
["app/collection1:3"],
)
@pytest.mark.asyncio
async def test_get_gata_since_change_id_data_in_db_empty_change_id(element_cache):
result = await element_cache.get_data_since(None, 1)
assert result == ({}, [])
@pytest.mark.asyncio
async def test_get_element_data_empty_redis(element_cache):
result = await element_cache.get_element_data("app/collection1", 1)
assert result == {"id": 1, "value": "value1"}
@pytest.mark.asyncio
async def test_get_element_data_empty_redis_does_not_exist(element_cache):
result = await element_cache.get_element_data("app/collection1", 3)
assert result is None
@pytest.mark.asyncio
async def test_get_element_data_full_redis(element_cache):
element_cache.cache_provider.full_data = {
"app/collection1:1": '{"id": 1, "value": "value1"}',
"app/collection1:2": '{"id": 2, "value": "value2"}',
"app/collection2:1": '{"id": 1, "key": "value1"}',
"app/collection2:2": '{"id": 2, "key": "value2"}',
}
result = await element_cache.get_element_data("app/collection1", 1)
assert result == {"id": 1, "value": "value1"}
@pytest.mark.asyncio
async def test_get_all_restricted_data(element_cache):
result = await element_cache.get_all_data_list(1)
# The output from redis has to be the same then the db_data
assert sort_dict(result) == sort_dict(
{
"app/collection1": [
{"id": 1, "value": "restricted_value1"},
{"id": 2, "value": "restricted_value2"},
],
"app/collection2": [
{"id": 1, "key": "restricted_value1"},
{"id": 2, "key": "restricted_value2"},
],
"app/personalized-collection": [{"id": 1, "key": "value1", "user_id": 1}],
}
)
@pytest.mark.asyncio
async def test_get_restricted_data_change_id_0(element_cache):
result = await element_cache.get_data_since(2, 0)
assert sort_dict(result[0]) == sort_dict(
{
"app/collection1": [
{"id": 1, "value": "restricted_value1"},
{"id": 2, "value": "restricted_value2"},
],
"app/collection2": [
{"id": 1, "key": "restricted_value1"},
{"id": 2, "key": "restricted_value2"},
],
"app/personalized-collection": [{"id": 2, "key": "value2", "user_id": 2}],
}
)
@pytest.mark.asyncio
async def test_get_restricted_data_2(element_cache):
element_cache.cache_provider.change_id_data = {
1: {"app/collection1:1", "app/collection1:3"}
}
result = await element_cache.get_data_since(0, 1)
assert result == (
{"app/collection1": [{"id": 1, "value": "restricted_value1"}]},
["app/collection1:3"],
)
@pytest.mark.asyncio
async def test_get_restricted_data_from_personalized_cacheable(element_cache):
element_cache.cache_provider.change_id_data = {1: {"app/personalized-collection:2"}}
result = await element_cache.get_data_since(0, 1)
assert result == ({}, [])
@pytest.mark.asyncio
async def test_get_restricted_data_change_id_lower_than_in_redis(element_cache):
element_cache.cache_provider.default_change_id = 2
with pytest.raises(ChangeIdTooLowError):
await element_cache.get_data_since(0, 1)
@pytest.mark.asyncio
async def test_get_restricted_data_change_with_id(element_cache):
element_cache.cache_provider.change_id_data = {2: {"app/collection1:1"}}
result = await element_cache.get_data_since(0, 2)
assert result == (
{"app/collection1": [{"id": 1, "value": "restricted_value1"}]},
[],
)
@pytest.mark.asyncio
async def test_lowest_change_id_after_updating_lowest_element(element_cache):
await element_cache.change_elements(
{"app/collection1:1": {"id": 1, "value": "updated1"}}
)
first_lowest_change_id = await element_cache.get_lowest_change_id()
# Alter same element again
await element_cache.change_elements(
{"app/collection1:1": {"id": 1, "value": "updated2"}}
)
second_lowest_change_id = await element_cache.get_lowest_change_id()
assert first_lowest_change_id == 0
assert second_lowest_change_id == 0 # The lowest_change_id should not change
| 33.532544
| 88
| 0.618405
| 1,424
| 11,334
| 4.673455
| 0.077949
| 0.115402
| 0.016829
| 0.06281
| 0.872427
| 0.853644
| 0.830654
| 0.815177
| 0.780316
| 0.746657
| 0
| 0.034367
| 0.214399
| 11,334
| 337
| 89
| 33.632047
| 0.71305
| 0.031939
| 0
| 0.58
| 0
| 0
| 0.287909
| 0.031462
| 0
| 0
| 0
| 0
| 0.092
| 1
| 0.012
| false
| 0
| 0.02
| 0
| 0.044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a5b920087ea7dda4c9be14ec1449dd543d12274
| 16,230
|
py
|
Python
|
tests/TestReconciliationManager.py
|
jbhayback/reconciliation-manager
|
5de10a0ec89e397a4937d1764976c94cde06beee
|
[
"MIT"
] | null | null | null |
tests/TestReconciliationManager.py
|
jbhayback/reconciliation-manager
|
5de10a0ec89e397a4937d1764976c94cde06beee
|
[
"MIT"
] | null | null | null |
tests/TestReconciliationManager.py
|
jbhayback/reconciliation-manager
|
5de10a0ec89e397a4937d1764976c94cde06beee
|
[
"MIT"
] | null | null | null |
"""
Test class for testing ReconciliationManager.
"""
import os
from collections import defaultdict
from unittest.mock import MagicMock
from ReconciliationManager import ReconciliationManager
from .TestLogger import Logger
from .TestBase import TestBase
CWD = os.path.abspath(os.path.dirname(__file__))
TEST_DATAPATH = os.path.join(CWD, 'test_data')
logger = Logger('testing_logger')
class TestReconciliationManager(TestBase):
def __init__(self, *args, **kwargs):
super(TestReconciliationManager, self).__init__(*args, **kwargs)
self.logger = logger
def get_test_data(self, fpaths):
test_data = []
for fpath in fpaths:
fname, f_ext = os.path.splitext(fpath)
if f_ext.lower() == '.yml':
test_data.append(self.read_yaml(fpath))
elif f_ext.lower() == '.json':
test_data.append(self.read_json(fpath))
else:
raise ValueError("File format not supported! Double check the file ext.")
return test_data
@staticmethod
def get_dummy_mapped_snow_cust_to_monit_org():
dummy_mapped_snow_cust_to_monit_org = defaultdict(list)
dummy_mapped_snow_cust_to_monit_org['03b82e935c1f4dd9a9be0a2c21cb97d4'] = {
'crm_id': '03b82e935c1f4dd9a9be0a2c21cb97d4',
'company': 'Pearl Lighting',
'address': '',
'city': 'Sydney',
'state': 'NSW',
'zip': '',
'country': 'Australia',
'latitude': '',
'longitude': '',
}
dummy_mapped_snow_cust_to_monit_org['e806eb2f66ad41babd790d7d254fab64'] = {
'crm_id': 'e806eb2f66ad41babd790d7d254fab64',
'company': 'Fortune Lighting',
'address': '',
'city': 'Rowville',
'state': 'VIC',
'zip': '',
'country': 'USA',
'latitude': '',
'longitude': '',
}
return dummy_mapped_snow_cust_to_monit_org
@staticmethod
def dummy_get_snow_cust_data_for_monit_org_update(monit_org_record, snow_cust_record):
result = {
'company': 'Pearl Lighting',
'city': 'Sydney',
'state': 'NSW',
'country': 'Australia',
'uri': '/api/organization/53',
}
return result
def get_reconciliation_manager(self, snow_cust_fname, monit_orgs_fname):
snow_cust_fpath = os.path.join(TEST_DATAPATH, snow_cust_fname)
monit_orgs_fpath = os.path.join(TEST_DATAPATH, monit_orgs_fname)
fpaths = [snow_cust_fpath, monit_orgs_fpath]
test_data = self.get_test_data(fpaths)
reconciliation_manager = ReconciliationManager(
self.logger, test_data[0], test_data[1]
)
return reconciliation_manager
###################################
# TEST PROPER #
###################################
def test_get_mapped_snow_cust_to_monit_org(self):
self.logger.info("Executing test for _get_mapped_snow_cust_to_monit_org...")
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.json', 'monitoring-orgs-reduced.json'
)
# Expectations setup
expected_mapped_snow_cust_to_monit_org = (
self.get_dummy_mapped_snow_cust_to_monit_org()
)
# Calling the method to test
actual_mapped_snow_cust_to_monit_org = base._get_mapped_snow_cust_to_monit_org()
# Assertions
assert (
actual_mapped_snow_cust_to_monit_org == expected_mapped_snow_cust_to_monit_org
)
assert len(actual_mapped_snow_cust_to_monit_org) == len(
expected_mapped_snow_cust_to_monit_org
)
def test_check_monit_org_for_delete(self):
self.logger.info(
"Executing test for _check_monit_org_for_update_or_delete : DELETE..."
)
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.json', 'monitoring-orgs-reduced.json'
)
dummy_mapped_snow_cust_to_monit_org = (
self.get_dummy_mapped_snow_cust_to_monit_org()
)
dummy_monit_org_record = {
'uri': '/api/organization/55',
'details': {
'crm_id': '03b82e935c1f4dd9a9be0a2c21cb97d1',
'city': 'Melbourne',
'zip': '',
'state': 'VIC',
'latitude': '',
'company': 'Smartechnologies',
'address': '',
'country': 'AU',
'longitude': '',
},
}
# Expectations setup
expected_tasks_for_update = []
expected_tasks_for_delete = ['/api/organization/55']
expected_tasks_for_create = []
# Calling the method to test
base._check_monit_org_for_update_or_delete(
dummy_mapped_snow_cust_to_monit_org, dummy_monit_org_record
)
# Assertions
assert base.tasks['create'] == expected_tasks_for_create
assert base.tasks['update'] == expected_tasks_for_update
assert base.tasks['delete'] == expected_tasks_for_delete
def test_check_monit_org_for_update(self):
self.logger.info(
"Executing test for _check_monit_org_for_update_or_delete : UPDATE..."
)
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.json', 'monitoring-orgs-reduced.json'
)
base._get_snow_cust_data_for_monit_org_update = MagicMock()
base._get_snow_cust_data_for_monit_org_update.side_effect = (
self.dummy_get_snow_cust_data_for_monit_org_update
)
dummy_mapped_snow_cust_to_monit_org = (
self.get_dummy_mapped_snow_cust_to_monit_org()
)
dummy_monit_org_record = {
'uri': '/api/organization/53',
'details': {
'crm_id': '03b82e935c1f4dd9a9be0a2c21cb97d4',
'city': 'Melbourne',
'zip': '',
'state': 'VIC',
'latitude': '',
'company': 'Smartechnologies',
'address': '',
'country': 'AU',
'longitude': '',
},
}
# Expectations setup
expected_tasks_for_update = [
{
'company': 'Pearl Lighting',
'city': 'Sydney',
'state': 'NSW',
'country': 'Australia',
'uri': '/api/organization/53',
}
]
expected_tasks_for_delete = []
expected_tasks_for_create = []
# Calling the method to test
base._check_monit_org_for_update_or_delete(
dummy_mapped_snow_cust_to_monit_org, dummy_monit_org_record
)
# Assertions
assert base.tasks['create'] == expected_tasks_for_create
assert base.tasks['update'] == expected_tasks_for_update
assert base.tasks['delete'] == expected_tasks_for_delete
base._get_snow_cust_data_for_monit_org_update.assert_called_once()
def test_get_snow_cust_data_for_monit_org_update(self):
self.logger.info("Executing test for _get_snow_cust_data_for_monit_org_update...")
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.json', 'monitoring-orgs-reduced.json'
)
dummy_mapped_snow_cust_to_monit_org_record = {
'crm_id': '03b82e935c1f4dd9a9be0a2c21cb97d4',
'company': 'Pearl Lighting',
'address': '',
'city': 'Sydney',
'state': 'NSW',
'zip': '',
'country': 'Australia',
'latitude': '',
'longitude': '',
}
dummy_monit_org_record = {
'uri': '/api/organization/53',
'details': {
'crm_id': '03b82e935c1f4dd9a9be0a2c21cb97d4',
'city': 'Melbourne',
'zip': '',
'state': 'VIC',
'latitude': '',
'company': 'Smartechnologies',
'address': '',
'country': 'AU',
'longitude': '',
},
}
# Expectations setup
expected_snow_cust_data_for_update = {
'company': 'Pearl Lighting',
'city': 'Sydney',
'state': 'NSW',
'country': 'Australia',
'uri': '/api/organization/53',
}
# Calling the method to test
actual_snow_cust_data_for_update = base._get_snow_cust_data_for_monit_org_update(
dummy_monit_org_record, dummy_mapped_snow_cust_to_monit_org_record
)
# Assertions
assert actual_snow_cust_data_for_update == expected_snow_cust_data_for_update
def test_prepare_reconciliation_tasks_empty_snow_cust_data(self):
self.logger.info(
"Executing test for prepare_reconciliation_tasks but with empty snow customer data..." # NOQA
)
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-empty.json', 'monitoring-orgs-reduced.json'
)
# Expectations setup
expected_tasks_for_update = []
expected_tasks_for_delete = [
'/api/organization/53',
'/api/organization/143',
]
expected_tasks_for_create = []
# Calling the method to test
actual_prepared_tasks = base.prepare_reconciliation_tasks()
assert actual_prepared_tasks['create'] == expected_tasks_for_create
assert actual_prepared_tasks['update'] == expected_tasks_for_update
assert actual_prepared_tasks['delete'] == expected_tasks_for_delete
def test_prepare_reconciliation_tasks_empty_monit_org_data(self):
self.logger.info(
"Executing test for prepare_reconciliation_tasks but with empty monitoring organization data..." # NOQA
)
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.json', 'monitoring-orgs-empty.json'
)
# Expectations setup
expected_tasks_for_update = []
expected_tasks_for_delete = []
expected_tasks_for_create = [
{
'crm_id': '03b82e935c1f4dd9a9be0a2c21cb97d4',
'company': 'Pearl Lighting',
'address': '',
'city': 'Sydney',
'state': 'NSW',
'zip': '',
'country': 'Australia',
'latitude': '',
'longitude': '',
},
{
'crm_id': 'e806eb2f66ad41babd790d7d254fab64',
'company': 'Fortune Lighting',
'address': '',
'city': 'Rowville',
'state': 'VIC',
'zip': '',
'country': 'USA',
'latitude': '',
'longitude': '',
},
]
# Calling the method to test
actual_prepared_tasks = base.prepare_reconciliation_tasks()
assert actual_prepared_tasks['create'] == expected_tasks_for_create
assert actual_prepared_tasks['update'] == expected_tasks_for_update
assert actual_prepared_tasks['delete'] == expected_tasks_for_delete
def test_prepare_reconciliation_tasks_json_input_files(self):
self.logger.info(
"Executing test for prepare_reconciliation_tasks_json_input_files..."
)
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.json', 'monitoring-orgs-reduced.json'
)
base._get_mapped_snow_cust_to_monit_org = MagicMock()
base._get_mapped_snow_cust_to_monit_org.side_effect = (
self.get_dummy_mapped_snow_cust_to_monit_org
)
base._get_snow_cust_data_for_monit_org_update = MagicMock()
base._get_snow_cust_data_for_monit_org_update.side_effect = (
self.dummy_get_snow_cust_data_for_monit_org_update
)
# Expectations setup
expected_tasks_for_update = [
{
'company': 'Pearl Lighting',
'city': 'Sydney',
'state': 'NSW',
'country': 'Australia',
'uri': '/api/organization/53',
}
]
expected_tasks_for_delete = ['/api/organization/143']
expected_tasks_for_create = [
{
'crm_id': 'e806eb2f66ad41babd790d7d254fab64',
'company': 'Fortune Lighting',
'address': '',
'city': 'Rowville',
'state': 'VIC',
'zip': '',
'country': 'USA',
'latitude': '',
'longitude': '',
}
]
# Calling the method to test
actual_prepared_tasks = base.prepare_reconciliation_tasks()
# Assertions
assert actual_prepared_tasks['create'] == expected_tasks_for_create
assert actual_prepared_tasks['update'] == expected_tasks_for_update
assert actual_prepared_tasks['delete'] == expected_tasks_for_delete
base._get_mapped_snow_cust_to_monit_org.assert_called_once()
base._get_snow_cust_data_for_monit_org_update.assert_called_once()
self.write_json('output_JSON_from_JSON_inputs', actual_prepared_tasks)
def test_prepare_reconciliation_tasks_yaml_input_files(self):
self.logger.info(
"Executing test for prepare_reconciliation_tasks_yaml_input_files..."
)
# Setting up
base = self.get_reconciliation_manager(
'snow-customers-reduced.yml', 'monitoring-orgs-reduced.yml'
)
base._get_mapped_snow_cust_to_monit_org = MagicMock()
base._get_mapped_snow_cust_to_monit_org.side_effect = (
self.get_dummy_mapped_snow_cust_to_monit_org
)
base._get_snow_cust_data_for_monit_org_update = MagicMock()
base._get_snow_cust_data_for_monit_org_update.side_effect = (
self.dummy_get_snow_cust_data_for_monit_org_update
)
# Expectations setup
expected_tasks_for_update = [
{
'company': 'Pearl Lighting',
'city': 'Sydney',
'state': 'NSW',
'country': 'Australia',
'uri': '/api/organization/53',
}
]
expected_tasks_for_delete = ['/api/organization/143']
expected_tasks_for_create = [
{
'crm_id': 'e806eb2f66ad41babd790d7d254fab64',
'company': 'Fortune Lighting',
'address': '',
'city': 'Rowville',
'state': 'VIC',
'zip': '',
'country': 'USA',
'latitude': '',
'longitude': '',
}
]
# Calling the method to test
actual_prepared_tasks = base.prepare_reconciliation_tasks()
# Assertions
assert actual_prepared_tasks['create'] == expected_tasks_for_create
assert actual_prepared_tasks['update'] == expected_tasks_for_update
assert actual_prepared_tasks['delete'] == expected_tasks_for_delete
base._get_mapped_snow_cust_to_monit_org.assert_called_once()
base._get_snow_cust_data_for_monit_org_update.assert_called_once()
# Produce ouput JSON file
self.write_json('output_JSON_from_YAML_inputs', actual_prepared_tasks)
def test_prepare_reconciliation_tasks(self):
self.logger.info("Executing test for prepare_reconciliation_tasks...")
# Setting up
base = self.get_reconciliation_manager(
'snow-customers.json', 'monitoring-orgs.json'
)
# Calling the method to test
actual_prepared_tasks = base.prepare_reconciliation_tasks()
# Produce ouput JSON file
self.write_json('output_JSON', actual_prepared_tasks)
# Print output JSON
self.print_json(actual_prepared_tasks)
| 35.592105
| 116
| 0.585952
| 1,618
| 16,230
| 5.429543
| 0.090853
| 0.055549
| 0.065566
| 0.05646
| 0.850541
| 0.838702
| 0.802049
| 0.766648
| 0.73136
| 0.686625
| 0
| 0.021673
| 0.312015
| 16,230
| 455
| 117
| 35.67033
| 0.76509
| 0.044054
| 0
| 0.595506
| 0
| 0
| 0.206637
| 0.082733
| 0
| 0
| 0
| 0
| 0.073034
| 1
| 0.039326
| false
| 0
| 0.016854
| 0
| 0.070225
| 0.002809
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a725658d2ae09f32f9d44ef6a52a9c783b8c5ba
| 11,107
|
py
|
Python
|
main_scripts/main_qm9_experiment.py
|
JiaHe-yogurt/GNN
|
6b6dbc362591b4521e0b437d17ab09c1c879aa75
|
[
"Apache-2.0"
] | null | null | null |
main_scripts/main_qm9_experiment.py
|
JiaHe-yogurt/GNN
|
6b6dbc362591b4521e0b437d17ab09c1c879aa75
|
[
"Apache-2.0"
] | null | null | null |
main_scripts/main_qm9_experiment.py
|
JiaHe-yogurt/GNN
|
6b6dbc362591b4521e0b437d17ab09c1c879aa75
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import copy
"""
How To:
Example for running from command line:
python <path_to>/ProvablyPowerfulGraphNetworks/main_scripts/main_qm9_experiment.py --config=configs/qm9_config.json
"""
# Change working directory to project's main directory, and add it to path - for library and config usages
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(project_dir)
os.chdir(project_dir)
import importlib
from data_loader.data_generator import DataGenerator, QM9_DataGenerator, QM9_DataGenerator_gnn3
from models.invariant_basic import invariant_basic, QM9_invariant_basic,QM9_invariant_basic2, QM9_invariant_basic_gnn3
from trainers.trainer import Trainer, QM9_Trainer, QM9_Trainer_gnn3
import trainers.trainer as trainers
importlib.reload(trainers)
from Utils.config import process_config
from Utils.dirs import create_dirs
from Utils import doc_utils
from Utils.utils import get_args
import tensorflow.compat.v1 as tf
import pandas as pd
tf.disable_eager_execution()
import random
def parametersearch():
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')
data = QM9_DataGenerator(config)
#train_labels_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_labels_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
base_summary_folder = config.summary_dir
base_exp_name = config.exp_name
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
# for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
param_grid = {
'learning_rate': list(np.logspace(np.log10(0.00005), np.log10(0.1), base=10, num=1000)),
'architecture1d': list(range(5, 500, 10)),
'architecture2d': list(range(5, 500, 10)),
'architecture3d': list(range(5, 500, 10)),
}
LR, A1, A2, A3 = [], [], [], []
for expe in range(5) :
hyperparameters = {k: random.sample(v, 1)[0] for k, v in param_grid.items()}
lr, a1, a2, a3 = hyperparameters['learning_rate'], hyperparameters['architecture1d'], hyperparameters['architecture2d'], hyperparameters['architecture3d']
LR.append(lr), A1.append(a1), A2.append(a2), A3.append(a3)
config.exp_name = base_exp_name + "lr={0}_a1={1}_a2={2}=_a3={3}".format(lr, a1, a2, a3)
curr_dir = os.path.join(base_summary_folder,
"lr={0}_a1={1}_a2={2}=_a3={3}".format(lr, a1, a2, a3))
config.summary_dir = curr_dir
# create your data generator
data.config.learning_rate = lr
data.config.architecture1d = [a1]
data.config.architecture2d = [a2]
data.config.architecture3d = [a3]
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic_gnn3(config, data)
# create trainer and pass all the previous components to it
trainer = QM9_Trainer_gnn3(sess, model, data, config)
# here you train your model
trainer.train()
sess.close()
tf.reset_default_graph()
import pandas as pd
def summary_10fold_results(summary_dir):
df = pd.read_csv(summary_dir+"/per_epoch_stats.csv")
acc = np.array(df["val_accuracy"])
for i in range(len(acc)):
acc[i] = float(''.join(list(acc[i])[1:-1]))
print("Results")
print("Mean MAR = {0}".format(np.mean(acc)))
# print("Mean std = {0}".format(np.std(acc)))
#for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
for lr in [0.00008*(2**i) for i in range(2,8)]:
dir = base_exp_name + "lr={0}".format(lr)
print('lr:' + str(lr))
summary_10fold_results(dir)
def main():
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/example.json')
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
print("lr = {0}".format(config.learning_rate))
print("decay = {0}".format(config.decay_rate))
if config.target_param is not False: # (0 == False) while (0 is not False)
print("target parameter: {0}".format(config.target_param))
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
data = QM9_DataGenerator(config)
train_lables_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_lables_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic(config, data)
# create trainer and pass all the previous components to it
trainer = trainers.QM9_Trainer(sess, model, data, config)
# here you train your model
trainer.train()
# test model, restore best model
test_dists, test_loss, pred= trainer.test(load_best_model=True)
sess.close()
tf.reset_default_graph()
doc_utils.summary_qm9_results(config.summary_dir, test_dists, test_loss, trainer.best_epoch)
############## gnn 3 ###############
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/example.json')
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
print("lr = {0}".format(config.learning_rate))
print("decay = {0}".format(config.decay_rate))
if config.target_param is not False: # (0 == False) while (0 is not False)
print("target parameter: {0}".format(config.target_param))
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
data = QM9_DataGenerator_gnn3(config)
train_labels_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_labels_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic_gnn3(config, data)
# create trainer and pass all the previous components to it
trainer = trainers.QM9_Trainer_gnn3(sess, model, data, config)
# here you train your model
trainer.train()
# test model, restore best model
test_dists, test_loss, pred= trainer.test(load_best_model=True)
sess.close()
tf.reset_default_graph()
def parametersearch_gnn3():
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')
data = QM9_DataGenerator_gnn3(config)
#train_labels_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_labels_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
base_summary_folder = config.summary_dir
base_exp_name = config.exp_name
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
# for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
param_grid = {
'learning_rate': list(np.logspace(np.log10(0.00005), np.log10(0.1), base=10, num=1000)),
'architecture1d': [100],
'architecture2d': [100],
'architecture3d': [100],
}
for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
config.exp_name = base_exp_name + "lr={0}".format(lr)
curr_dir = os.path.join(base_summary_folder,
"lr={0}".format(lr))
config.summary_dir = curr_dir
# create your data generator
data.config.learning_rate = lr
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic_gnn3(config, data)
# create trainer and pass all the previous components to it
trainer = trainers.QM9_Trainer_gnn3(sess, model, data, config)
# here you train your model
trainer.train()
sess.close()
tf.reset_default_graph()
for lr in [0.00008*(2**i) for i in range(2,8)]:
dir = base_exp_name + "lr={0}".format(lr)
print('lr:' + str(lr))
summary_10fold_results(dir)
| 49.364444
| 170
| 0.664176
| 1,496
| 11,107
| 4.716578
| 0.141043
| 0.028061
| 0.038549
| 0.034014
| 0.775652
| 0.767715
| 0.767715
| 0.766582
| 0.759354
| 0.759354
| 0
| 0.029736
| 0.224903
| 11,107
| 224
| 171
| 49.584821
| 0.789871
| 0
| 0
| 0.647799
| 0
| 0
| 0.08451
| 0.035468
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.132075
| null | null | 0.062893
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a94c09e124da89a904f6996b6fbee33bd7c8639
| 193
|
py
|
Python
|
pycparserext/c_generator.py
|
t11230/pycparserext
|
667781d692eb69ad4906eb107e168d4384bf006d
|
[
"MIT"
] | 51
|
2015-01-30T20:57:55.000Z
|
2022-03-31T05:32:36.000Z
|
pycparserext/c_generator.py
|
t11230/pycparserext
|
667781d692eb69ad4906eb107e168d4384bf006d
|
[
"MIT"
] | 54
|
2015-01-17T14:11:18.000Z
|
2022-03-05T15:09:40.000Z
|
pycparserext/c_generator.py
|
t11230/pycparserext
|
667781d692eb69ad4906eb107e168d4384bf006d
|
[
"MIT"
] | 19
|
2015-05-16T09:32:16.000Z
|
2022-03-31T05:32:37.000Z
|
from warnings import warn
warn("pycparserext.c_generator is deprecated. Please use pycparser.c_generator "
"directly.", DeprecationWarning)
from pycparser.c_generator import * # noqa
| 32.166667
| 80
| 0.777202
| 23
| 193
| 6.391304
| 0.652174
| 0.204082
| 0.258503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150259
| 193
| 5
| 81
| 38.6
| 0.896341
| 0.020725
| 0
| 0
| 0
| 0
| 0.438503
| 0.240642
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
43bac08e0072e30e7d3385666f0a4ddc9d0bf360
| 106
|
py
|
Python
|
Projeto Django/contato/views.py
|
yurimses/tp-cruzi-db
|
201c114b70c7dc3b7eaf549d1778689567499e22
|
[
"MIT"
] | 1
|
2021-03-26T18:21:59.000Z
|
2021-03-26T18:21:59.000Z
|
Projeto Django/contato/views.py
|
yurimses/tp-cruzi-db
|
201c114b70c7dc3b7eaf549d1778689567499e22
|
[
"MIT"
] | 14
|
2021-03-26T20:54:22.000Z
|
2021-04-06T17:18:53.000Z
|
Projeto Django/contato/views.py
|
yurimses/tp-cruzi-db
|
201c114b70c7dc3b7eaf549d1778689567499e22
|
[
"MIT"
] | 2
|
2021-04-01T23:43:20.000Z
|
2021-04-27T13:35:28.000Z
|
from django.shortcuts import render
def index(request):
return render(request, 'contato/index.html')
| 21.2
| 48
| 0.764151
| 14
| 106
| 5.785714
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 4
| 49
| 26.5
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0.169811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
43bcb1f9ac98959200ff683426846a0d3049c978
| 997
|
py
|
Python
|
Antiplagiat/Antiplagiat/bin/Debug/25335.py
|
DmitryTheFirst/AntiplagiatVkCup
|
556d3fe2e5a630d06a7aa49f2af5dcb28667275a
|
[
"Apache-2.0"
] | 1
|
2015-07-04T14:45:32.000Z
|
2015-07-04T14:45:32.000Z
|
Antiplagiat/Antiplagiat/bin/Debug/25335.py
|
DmitryTheFirst/AntiplagiatVkCup
|
556d3fe2e5a630d06a7aa49f2af5dcb28667275a
|
[
"Apache-2.0"
] | null | null | null |
Antiplagiat/Antiplagiat/bin/Debug/25335.py
|
DmitryTheFirst/AntiplagiatVkCup
|
556d3fe2e5a630d06a7aa49f2af5dcb28667275a
|
[
"Apache-2.0"
] | null | null | null |
s=raw_input()
n=len(s)
f=[0]*n
g=[0]*n
'''
AB->BA
No Other AB
'''
# 0=A
f[0]=1
for i in xrange(0,n-1):
if (s[i]=='A') and (s[i+1]=='A'):
f[i+1]+=g[i]
f[i+1]+=f[i]
if (s[i]=='A') and (s[i+1]=='B'):
f[i+1]+=f[i]
f[i+1]+=g[i]
g[i+1]+=g[i]
if (s[i]=='B') and (s[i+1]=='A'):
g[i+1]+=f[i]
f[i+1]+=g[i]
if (s[i]=='B') and (s[i+1]=='B'):
f[i+1]+=g[i]
g[i+1]+=g[i]
ans=0
if s[n-1]=='B':
ans+=g[n-1]
else:
ans=f[n-1]+g[n-1]
f=[0]*n
g=[0]*n
# 0=B
g[0]=1
for i in xrange(0,n-1):
if (s[i]=='A') and (s[i+1]=='A'):
f[i+1]+=g[i]
f[i+1]+=f[i]
if (s[i]=='A') and (s[i+1]=='B'):
f[i+1]+=f[i]
f[i+1]+=g[i]
g[i+1]+=g[i]
if (s[i]=='B') and (s[i+1]=='A'):
g[i+1]+=f[i]
f[i+1]+=g[i]
if (s[i]=='B') and (s[i+1]=='B'):
f[i+1]+=g[i]
g[i+1]+=g[i]
if (s[0]=='A') and (s[n-1]=='B'):
ans+=f[n-1]
elif (s[0]=='B'):
ans+=g[n-1]
print ans
| 19.173077
| 37
| 0.335005
| 251
| 997
| 1.326693
| 0.099602
| 0.156156
| 0.108108
| 0.144144
| 0.777778
| 0.705706
| 0.66967
| 0.66967
| 0.66967
| 0.66967
| 0
| 0.070028
| 0.283852
| 997
| 52
| 38
| 19.173077
| 0.396359
| 0.007021
| 0
| 0.73913
| 0
| 0
| 0.02079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.021739
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43cbda7be2fb312c8b8f5ed72a4d0c5fba998db0
| 41
|
py
|
Python
|
my_package/analysis/__init__.py
|
priydarshiroopak/Python_DS_20CS30042
|
e45a539610bb3aa990e80bbc4dfb318b7ed7b4d8
|
[
"MIT"
] | null | null | null |
my_package/analysis/__init__.py
|
priydarshiroopak/Python_DS_20CS30042
|
e45a539610bb3aa990e80bbc4dfb318b7ed7b4d8
|
[
"MIT"
] | null | null | null |
my_package/analysis/__init__.py
|
priydarshiroopak/Python_DS_20CS30042
|
e45a539610bb3aa990e80bbc4dfb318b7ed7b4d8
|
[
"MIT"
] | null | null | null |
from .visualize import plot_visualization
| 41
| 41
| 0.902439
| 5
| 41
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
43e1c90ac8714572338cd1b33c9b1a37ec31a303
| 22,750
|
py
|
Python
|
objectModel/Python/tests/cdm/projection/test_projection_add_type.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 884
|
2019-05-10T02:09:10.000Z
|
2022-03-31T14:02:00.000Z
|
objectModel/Python/tests/cdm/projection/test_projection_add_type.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 171
|
2019-06-10T11:34:37.000Z
|
2022-03-31T22:50:12.000Z
|
objectModel/Python/tests/cdm/projection/test_projection_add_type.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 340
|
2019-05-07T18:00:16.000Z
|
2022-03-31T12:00:15.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.enums import CdmObjectType
from cdm.storage import LocalAdapter
from cdm.utilities import ResolveOptions, AttributeResolutionDirectiveSet
from tests.common import async_test, TestHelper
from tests.utilities.projection_test_utils import ProjectionTestUtils
class ProjectionAddTypeTest(unittest.TestCase):
"""A test class for testing the AddTypeAttribute operation in a projection as well as SelectedTypeAttribute in a resolution guidance"""
# All possible combinations of the different resolution directives
res_opts_combinations = [
[],
['referenceOnly'],
['normalized'],
['structured'],
['referenceOnly', 'normalized'],
['referenceOnly', 'structured'],
['normalized', 'structured'],
['referenceOnly', 'normalized', 'structured']
]
# The path between TestDataPath and TestName.
tests_subpath = os.path.join('Cdm', 'Projection', 'ProjectionAddTypeTest')
@async_test
async def test_entity_attribute_proj_using_object_model(self):
"""Test for creating a projection with an AddTypeAttribute operation on an entity attribute using the object model"""
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, 'test_entity_attribute_proj_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_entity_attribute_proj_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
# Create an entity
entity = ProjectionTestUtils.create_entity(corpus, local_root)
# Create a projection
projection = ProjectionTestUtils.create_projection(corpus, local_root)
# Create an AddTypeAttribute operation
add_type_attr_op = corpus.make_object(CdmObjectType.OPERATION_ADD_TYPE_ATTRIBUTE_DEF)
add_type_attr_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testType')
add_type_attr_op.type_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'entityName', True)
projection.operations.append(add_type_attr_op)
# Create an entity reference to hold this projection
projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_entity_ref.explicit_reference = projection
# Create an entity attribute that contains this projection and add this to the entity
entity_attribute = corpus.make_object(CdmObjectType.ENTITY_ATTRIBUTE_DEF, 'TestEntityAttribute')
entity_attribute.entity = projection_entity_ref
entity.attributes.append(entity_attribute)
# Resolve the entity
resolved_entity = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), None, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# Type attribute: "testType"
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('id', resolved_entity.attributes[0].name)
self.assertEqual('name', resolved_entity.attributes[1].name)
self.assertEqual('value', resolved_entity.attributes[2].name)
self.assertEqual('date', resolved_entity.attributes[3].name)
self.assertEqual('testType', resolved_entity.attributes[4].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[4].applied_traits[4].named_reference)
@async_test
async def test_entity_proj_using_object_model(self):
"""Test for creating a projection with an AddTypeAttribute operation on an entity definition using the object model"""
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, 'test_entity_proj_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_entity_proj_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
# Create an entity
entity = ProjectionTestUtils.create_entity(corpus, local_root)
# Create a projection
projection = ProjectionTestUtils.create_projection(corpus, local_root)
# Create an AddTypeAttribute operation
add_type_attr_op = corpus.make_object(CdmObjectType.OPERATION_ADD_TYPE_ATTRIBUTE_DEF)
add_type_attr_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testType')
add_type_attr_op.type_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'entityName', True)
projection.operations.append(add_type_attr_op)
# Create an entity reference to hold this projection
projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_entity_ref.explicit_reference = projection
# Set the entity's ExtendEntity to be the projection
entity.extends_entity = projection_entity_ref
# Resolve the entity
resolved_entity = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), None, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# Type attribute: "testType"
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('id', resolved_entity.attributes[0].name)
self.assertEqual('name', resolved_entity.attributes[1].name)
self.assertEqual('value', resolved_entity.attributes[2].name)
self.assertEqual('date', resolved_entity.attributes[3].name)
self.assertEqual('testType', resolved_entity.attributes[4].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[4].applied_traits[4].named_reference)
@async_test
async def test_conditional_proj_using_object_model(self):
"""Test for creating a projection with an AddTypeAttribute operation and a condition using the object model"""
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, 'test_conditional_proj_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_conditional_proj_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
# Create an entity
entity = ProjectionTestUtils.create_entity(corpus, local_root)
# Create a projection with a condition that states the operation should only execute when the resolution directive is 'referenceOnly'
projection = ProjectionTestUtils.create_projection(corpus, local_root)
projection.condition = 'referenceOnly==True'
# Create an AddTypeAttribute operation
add_type_attr_op = corpus.make_object(CdmObjectType.OPERATION_ADD_TYPE_ATTRIBUTE_DEF)
add_type_attr_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testType')
add_type_attr_op.type_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'entityName', True)
projection.operations.append(add_type_attr_op)
# Create an entity reference to hold this projection
projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_entity_ref.explicit_reference = projection
# Create an entity attribute that contains this projection and add this to the entity
entity_attribute = corpus.make_object(CdmObjectType.ENTITY_ATTRIBUTE_DEF, 'TestEntityAttribute')
entity_attribute.entity = projection_entity_ref
entity.attributes.append(entity_attribute)
# Create resolution options with the 'referenceOnly' directive
res_opt = ResolveOptions(entity.in_document)
res_opt.directives = AttributeResolutionDirectiveSet(set(['referenceOnly']))
# Resolve the entity with 'referenceOnly'
resolved_entity_with_reference_only = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), res_opt, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# Type attribute: "testType"
self.assertEqual(5, len(resolved_entity_with_reference_only.attributes))
self.assertEqual('id', resolved_entity_with_reference_only.attributes[0].name)
self.assertEqual('name', resolved_entity_with_reference_only.attributes[1].name)
self.assertEqual('value', resolved_entity_with_reference_only.attributes[2].name)
self.assertEqual('date', resolved_entity_with_reference_only.attributes[3].name)
self.assertEqual('testType', resolved_entity_with_reference_only.attributes[4].name)
self.assertIsNotNone(resolved_entity_with_reference_only.attributes[4].applied_traits.item('is.linkedEntity.name'))
# Now resolve the entity with the 'structured' directive
res_opt.directives = AttributeResolutionDirectiveSet(set(['structured']))
resolved_entity_with_structured = await entity.create_resolved_entity_async('Resolved_{}.cdm.json'.format(entity.entity_name), res_opt, local_root)
# Verify correctness of the resolved attributes after running the AddTypeAttribute operation
# Original set of attributes: ["id", "name", "value", "date"]
# No Type attribute added, condition was false
self.assertEqual(4, len(resolved_entity_with_structured.attributes))
self.assertEqual('id', resolved_entity_with_structured.attributes[0].name)
self.assertEqual('name', resolved_entity_with_structured.attributes[1].name)
self.assertEqual('value', resolved_entity_with_structured.attributes[2].name)
self.assertEqual('date', resolved_entity_with_structured.attributes[3].name)
@async_test
async def test_add_type_attribute_proj(self):
"""AddTypeAttribute on an entity attribute"""
test_name = 'test_add_type_attribute_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType"
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[7].applied_traits[4].named_reference)
@async_test
async def test_selected_type_attr(self):
"""SelectedTypeAttribute on an entity attribute"""
test_name = 'test_selected_type_attr'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType"
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[7].applied_traits[4].named_reference)
@async_test
async def test_extends_entity_proj(self):
"""AddTypeAttribute on an entity definition"""
test_name = 'test_extends_entity_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType"
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[7].applied_traits[4].named_reference)
@async_test
async def test_extends_entity(self):
"""SelectedTypeAttribute on an entity definition"""
test_name = 'test_extends_entity'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType" (using extendsEntityResolutionGuidance)
self.assertEqual(8, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[7].applied_traits[4].named_reference)
@async_test
async def test_add_type_with_combine_proj(self):
"""AddTypeAttribute on an entity attribute (after a CombineAttributes)"""
test_name = 'test_add_type_with_combine_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Merge ["emailId, "phoneId, "socialId"] into "contactId", type attribute: "contactType"
self.assertEqual(6, len(resolved_entity.attributes))
self.assertEqual('address', resolved_entity.attributes[0].name)
self.assertEqual('isPrimary', resolved_entity.attributes[1].name)
self.assertEqual('number', resolved_entity.attributes[2].name)
self.assertEqual('account', resolved_entity.attributes[3].name)
self.assertEqual('contactId', resolved_entity.attributes[4].name)
self.assertEqual('contactType', resolved_entity.attributes[5].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[5].applied_traits[4].named_reference)
@async_test
async def test_combine_ops_proj(self):
"""AddTypeAttribute with other operations in the same projection"""
test_name = 'test_combine_ops_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Type attribute: "someType", rename "address" to "homeAddress"
self.assertEqual(9, len(resolved_entity.attributes))
self.assertEqual('emailId', resolved_entity.attributes[0].name)
self.assertEqual('address', resolved_entity.attributes[1].name)
self.assertEqual('isPrimary', resolved_entity.attributes[2].name)
self.assertEqual('phoneId', resolved_entity.attributes[3].name)
self.assertEqual('number', resolved_entity.attributes[4].name)
self.assertEqual('socialId', resolved_entity.attributes[5].name)
self.assertEqual('account', resolved_entity.attributes[6].name)
self.assertEqual('someType', resolved_entity.attributes[7].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[7].applied_traits[4].named_reference)
self.assertEqual('homeAddress', resolved_entity.attributes[8].name)
@async_test
async def test_combine_ops_nested_proj(self):
"""Nested projections with AddTypeAttribute and other operations"""
test_name = 'test_combine_ops_nested_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Merge ["emailId, "phoneId, "socialId"] into "contactId", type attribute: "contactType",
# rename ["contactId", "isPrimary"] as "new_{m}", include ["contactId", "new_isPrimary", "contactType"]
self.assertEqual(3, len(resolved_entity.attributes))
self.assertEqual('new_contactId', resolved_entity.attributes[0].name)
self.assertEqual('new_isPrimary', resolved_entity.attributes[1].name)
self.assertEqual('contactType', resolved_entity.attributes[2].name)
self.assertEqual('is.linkedEntity.name', resolved_entity.attributes[2].applied_traits[4].named_reference)
@async_test
async def test_conditional_proj(self):
"""AddTypeAttribute with a condition"""
test_name = 'test_conditional_proj'
entity_name = 'Customer'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name))
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [])
# Original set of attributes: ["emailId", "address", "isPrimary", "phoneId", "number", "socialId", "account"]
# Merge ["emailId, "phoneId, "socialId"] into "contactId", type attribute: "contactType"
# Condition for projection containing AddTypeAttribute is false, so no Type attribute is created
self.assertEqual(5, len(resolved_entity.attributes))
self.assertEqual('address', resolved_entity.attributes[0].name)
self.assertEqual('isPrimary', resolved_entity.attributes[1].name)
self.assertEqual('number', resolved_entity.attributes[2].name)
self.assertEqual('account', resolved_entity.attributes[3].name)
self.assertEqual('contactId', resolved_entity.attributes[4].name)
| 60.828877
| 162
| 0.736396
| 2,636
| 22,750
| 6.098634
| 0.075493
| 0.104504
| 0.125404
| 0.017417
| 0.877706
| 0.85581
| 0.843431
| 0.807664
| 0.801132
| 0.766049
| 0
| 0.0055
| 0.160835
| 22,750
| 373
| 163
| 60.991957
| 0.836572
| 0.156176
| 0
| 0.697095
| 0
| 0
| 0.097576
| 0.032961
| 0
| 0
| 0
| 0
| 0.39834
| 1
| 0
| false
| 0
| 0.029046
| 0
| 0.041494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78fe3932c1c93c8d105d7b2ed775d4eb25005e95
| 164
|
py
|
Python
|
tests/examples/unit_test.py
|
specipy/specipy
|
56daa95a57fa7425b7943c5a1a4d4978e75417ec
|
[
"Apache-2.0"
] | null | null | null |
tests/examples/unit_test.py
|
specipy/specipy
|
56daa95a57fa7425b7943c5a1a4d4978e75417ec
|
[
"Apache-2.0"
] | null | null | null |
tests/examples/unit_test.py
|
specipy/specipy
|
56daa95a57fa7425b7943c5a1a4d4978e75417ec
|
[
"Apache-2.0"
] | null | null | null |
# unit_test.py
from _specipy.framework import *
@test
def a_passing_test():
assert_true(1 + 2 == 3)
@test
def z_failing_test():
assert_true(1 + 2 == 5)
| 12.615385
| 32
| 0.658537
| 27
| 164
| 3.703704
| 0.666667
| 0.14
| 0.28
| 0.3
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046154
| 0.207317
| 164
| 12
| 33
| 13.666667
| 0.723077
| 0.073171
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| true
| 0.142857
| 0.142857
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
603dce064624c363e37081814a5438ca29a7eb39
| 23,644
|
py
|
Python
|
budgetportal/tests/test_prov_infra_chart.py
|
d3ft0uch/datamanager
|
60f2f9d5278d20ae553bb063dcedaf206bb3ab29
|
[
"MIT"
] | null | null | null |
budgetportal/tests/test_prov_infra_chart.py
|
d3ft0uch/datamanager
|
60f2f9d5278d20ae553bb063dcedaf206bb3ab29
|
[
"MIT"
] | null | null | null |
budgetportal/tests/test_prov_infra_chart.py
|
d3ft0uch/datamanager
|
60f2f9d5278d20ae553bb063dcedaf206bb3ab29
|
[
"MIT"
] | null | null | null |
import json
from budgetportal.json_encoder import JSONEncoder
from budgetportal.models import (
FinancialYear,
IRMSnapshot,
ProvInfraProject,
ProvInfraProjectSnapshot,
Quarter,
)
from budgetportal.prov_infra_project.charts import time_series_data
from django.test import TestCase
class DateQuarterMatchTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2019-20")
q4 = Quarter(number=4)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q4)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot,
project=self.project,
estimated_construction_start_date="2019-01-01",
estimated_construction_end_date="2021-12-31",
)
def test_dates_are_end_of_quarters(self):
"""Test that all dates are end day of a quarter"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 4)
# Q1->06-30, Q2->09-30, Q3->12-31, Q4->03-31
self.assertEqual(snapshots_data[0]["date"], "2019-06-30")
self.assertEqual(snapshots_data[1]["date"], "2019-09-30")
self.assertEqual(snapshots_data[2]["date"], "2019-12-31")
self.assertEqual(snapshots_data[3]["date"], "2020-03-31")
def test_dates_match_with_quarters(self):
"""Test that dates and quarter_labels match"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 4)
# Q1->06-30, Q2->09-30, Q3->12-31, Q4->03-31
self.assertEqual(snapshots_data[0]["date"], "2019-06-30")
self.assertEqual(snapshots_data[0]["quarter_label"], "END Q1")
self.assertEqual(snapshots_data[1]["date"], "2019-09-30")
self.assertEqual(snapshots_data[1]["quarter_label"], "END Q2")
self.assertEqual(snapshots_data[2]["date"], "2019-12-31")
self.assertEqual(snapshots_data[2]["quarter_label"], "END Q3")
self.assertEqual(snapshots_data[3]["date"], "2020-03-31")
self.assertEqual(snapshots_data[3]["quarter_label"], "END Q4")
class TotalEstimatedProjectCostTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot, project=self.project, total_project_cost=100
)
def test_total_project_cost_is_null(self):
"""Test that total project cost for Q1 (which created by Q2 snapshot) is Null"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q1 values
self.assertEqual(snapshots_data[0]["total_estimated_project_cost"], None)
def test_total_project_cost_assigned_correctly(self):
"""Test that total project cost for Q2 is 100"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q2 values
self.assertEqual(snapshots_data[1]["total_estimated_project_cost"], 100)
class StatusTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot, project=self.project, status="Tender"
)
def test_status_is_null(self):
"""Test that status for Q1 (which created by Q2 snapshot) is Null"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q1 values
self.assertEqual(snapshots_data[0]["status"], None)
def test_status_assigned_correctly(self):
"""Test that status for Q2 is Tender"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q2 values
self.assertEqual(snapshots_data[1]["status"], "Tender")
class Q1UpdateTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q1 = Quarter(number=1)
irm_snapshot_1 = IRMSnapshot(financial_year=self.fin_year, quarter=q1)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_1,
project=self.project,
actual_expenditure_q1=10,
expenditure_from_previous_years_total=200,
)
def test_q1_updated_after_q2_snapshot_inserted(self):
"""Test that Q1 values are updated correctly when Q2 snapshot is added"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 1)
# Check Q1 values
self.assertEqual(snapshots_data[0]["total_spent_in_quarter"], 10)
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 210)
# Create Q2 snapshot
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot_2 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2,
project=self.project,
actual_expenditure_q1=11,
actual_expenditure_q2=20,
expenditure_from_previous_years_total=200,
)
# Recreate the chart data
snapshots_data = time_series_data(
[self.project_snapshot, self.project_snapshot_2]
)
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q1 values
self.assertEqual(snapshots_data[0]["total_spent_in_quarter"], 11)
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 211)
class Q1Q2UpdateTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q1 = Quarter(number=1)
irm_snapshot_1 = IRMSnapshot(financial_year=self.fin_year, quarter=q1)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_1,
project=self.project,
actual_expenditure_q1=10,
expenditure_from_previous_years_total=200,
)
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot_2 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2,
project=self.project,
actual_expenditure_q1=11,
actual_expenditure_q2=20,
expenditure_from_previous_years_total=200,
)
def test_q1_q2_updated_after_q3_snapshot_inserted(self):
"""Test that Q1 and Q2 are updated correctly when Q3 inserted"""
snapshots_data = time_series_data(
[self.project_snapshot, self.project_snapshot_2]
)
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q1 values
self.assertEqual(snapshots_data[0]["total_spent_in_quarter"], 11)
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 211)
# Check Q2 values
self.assertEqual(snapshots_data[1]["total_spent_in_quarter"], 20)
self.assertEqual(snapshots_data[1]["total_spent_to_date"], 231)
# Create Q3 snapshot
q3 = Quarter(number=3)
irm_snapshot_3 = IRMSnapshot(financial_year=self.fin_year, quarter=q3)
self.project_snapshot_3 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_3,
project=self.project,
actual_expenditure_q1=12,
actual_expenditure_q2=21,
actual_expenditure_q3=30,
expenditure_from_previous_years_total=200,
)
# Recreate the chart data
snapshots_data = time_series_data(
[self.project_snapshot, self.project_snapshot_2, self.project_snapshot_3]
)
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 3)
# Check Q1 values
self.assertEqual(snapshots_data[0]["total_spent_in_quarter"], 12)
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 212)
# Check Q2 values
self.assertEqual(snapshots_data[1]["total_spent_in_quarter"], 21)
self.assertEqual(snapshots_data[1]["total_spent_to_date"], 233)
class NullQ2SubsequentNullSpendTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q3 = Quarter(number=3)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q3)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot,
project=self.project,
actual_expenditure_q1=10,
actual_expenditure_q2=None,
actual_expenditure_q3=30,
expenditure_from_previous_years_total=200,
)
def test_total_spends_are_correct(self):
"""Test that total spends are none because of actual_expenditure_q1"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 3)
# Check total_spent_to_date values for Q1, Q2 and Q3
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 210)
self.assertEqual(snapshots_data[1]["total_spent_to_date"], None)
self.assertEqual(snapshots_data[2]["total_spent_to_date"], None)
class LatestValueTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q1 = Quarter(number=1)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q1)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot,
project=self.project,
actual_expenditure_q1=10,
expenditure_from_previous_years_total=100,
)
def test_correct_value_used_for_previous_total(self):
"""
Q2 snapshot's expenditure_from_previous_years_total updates total_spent of Q1 chart item.
"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 1)
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 110)
# Create Q2 Snapshot
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot_2 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2,
project=self.project,
actual_expenditure_q1=10,
actual_expenditure_q2=20,
expenditure_from_previous_years_total=200,
)
# Recreate the chart data
snapshots_data = time_series_data(
[self.project_snapshot, self.project_snapshot_2]
)
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check total_spent_to_date values for Q1 and Q2
self.assertEqual(snapshots_data[0]["total_spent_to_date"], 210)
self.assertEqual(snapshots_data[1]["total_spent_to_date"], 230)
class NullExpenditureFromPreviousFinYearsTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2,
project=self.project,
actual_expenditure_q1=10,
actual_expenditure_q2=20,
expenditure_from_previous_years_total=None,
)
def test_total_spends_are_none(self):
"""Test that Q1 and Q2 total_spent values when expenditure_
from_previous_years_total is empty."""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check total_spent_to_date values for Q1 and Q2
self.assertEqual(snapshots_data[0]["total_spent_to_date"], None)
self.assertEqual(snapshots_data[1]["total_spent_to_date"], None)
class EmitMissingQuartersTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot, project=self.project
)
def test_two_snapshots_emitted(self):
"""Test that if the first snapshot is Q2, items are created for Q1 and Q2 but nothing later than Q2."""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q1 values
self.assertEqual(snapshots_data[0]["financial_year_label"], "2030-31")
self.assertEqual(snapshots_data[0]["date"], "2030-06-30")
self.assertEqual(snapshots_data[0]["quarter_label"], "END Q1")
# Check Q2 values
self.assertEqual(snapshots_data[1]["date"], "2030-09-30")
self.assertEqual(snapshots_data[1]["quarter_label"], "END Q2")
class EmitMissingQuartersSecondTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year_1 = FinancialYear(slug="2018-19")
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year_1, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2, project=self.project
)
self.fin_year_2 = FinancialYear(slug="2019-20")
q4 = Quarter(number=4)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year_2, quarter=q4)
self.project_snapshot_2 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2, project=self.project
)
def test_six_snapshots_emitted(self):
"""Test that 2018 Q2 created 2, and 2019 Q4 created 4 items"""
snapshots_data = time_series_data(
[self.project_snapshot, self.project_snapshot_2]
)
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 6)
# Check 2018's Q1 and Q2 in a row
self.assertEqual(snapshots_data[0]["financial_year_label"], "2018-19")
self.assertEqual(snapshots_data[0]["date"], "2018-06-30")
self.assertEqual(snapshots_data[0]["quarter_label"], "END Q1")
self.assertEqual(snapshots_data[1]["date"], "2018-09-30")
self.assertEqual(snapshots_data[1]["quarter_label"], "END Q2")
# Check 2019's Q1, Q2, Q3 and Q4 in a row
self.assertEqual(snapshots_data[2]["financial_year_label"], "2019-20")
self.assertEqual(snapshots_data[2]["date"], "2019-06-30")
self.assertEqual(snapshots_data[2]["quarter_label"], "END Q1")
self.assertEqual(snapshots_data[3]["date"], "2019-09-30")
self.assertEqual(snapshots_data[3]["quarter_label"], "END Q2")
self.assertEqual(snapshots_data[4]["date"], "2019-12-31")
self.assertEqual(snapshots_data[4]["quarter_label"], "END Q3")
self.assertEqual(snapshots_data[5]["date"], "2020-03-31")
self.assertEqual(snapshots_data[5]["quarter_label"], "END Q4")
class ComputeTotalSpentIn2YearsTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year_1 = FinancialYear(slug="2018-19")
q4 = Quarter(number=4)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year_1, quarter=q4)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2,
project=self.project,
actual_expenditure_q1=1,
actual_expenditure_q2=2,
actual_expenditure_q3=3,
actual_expenditure_q4=4,
expenditure_from_previous_years_total=100,
)
self.fin_year_2 = FinancialYear(slug="2019-20")
q1 = Quarter(number=1)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year_2, quarter=q1)
self.project_snapshot_2 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2,
project=self.project,
actual_expenditure_q1=50,
expenditure_from_previous_years_total=200,
)
def test_total_spent_to_dates_are_correct(self):
"""Test that the second year's total_spent_to_date starts from the second
year's total_from_previous_financial_years."""
snapshots_data = time_series_data(
[self.project_snapshot, self.project_snapshot_2]
)
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 5)
# Check that 2019 Q1 Snapshot's total_spent_to_date is correct
self.assertNotEqual(snapshots_data[4]["total_spent_to_date"], 110)
self.assertEqual(snapshots_data[4]["total_spent_to_date"], 250)
class FinancialYearLabelTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2, project=self.project
)
def test_label_is_assigned_to_q1(self):
"""Test that financial year label is correctly assigned for Q1"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q1 values
self.assertEqual(snapshots_data[0]["quarter_label"], "END Q1")
self.assertEqual(snapshots_data[0]["financial_year_label"], "2030-31")
def test_label_is_empty_for_q2(self):
"""Test that financial year label is empty for quarters except Q1"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 2)
# Check Q2 values
self.assertEqual(snapshots_data[1]["quarter_label"], "END Q2")
self.assertEqual(snapshots_data[1]["financial_year_label"], "")
class QuarterLabelTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q4 = Quarter(number=4)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q4)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2, project=self.project
)
def test_label_is_correct(self):
"""Test that quarter labels start with 'END Q' and ends with (1,2,3,4)"""
snapshots_data = time_series_data([self.project_snapshot])
snapshots_data = snapshots_data[u"snapshots"]
self.assertEqual(len(snapshots_data), 4)
# Check quarter label texts for all quarters
self.assertEqual(snapshots_data[0]["quarter_label"], "END Q1")
self.assertEqual(snapshots_data[1]["quarter_label"], "END Q2")
self.assertEqual(snapshots_data[2]["quarter_label"], "END Q3")
self.assertEqual(snapshots_data[3]["quarter_label"], "END Q4")
class EventsTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot,
project=self.project,
start_date="2029-09-30",
estimated_construction_start_date="2030-01-01",
estimated_completion_date="2033-02-01",
contracted_construction_end_date="2033-01-31",
estimated_construction_end_date="2032-12-31",
)
def test_events_assigned_correctly(self):
"""Test that all dates are assigned correctly"""
events_data = time_series_data([self.project_snapshot])
events_data = events_data[u"events"]
self.assertEqual(len(events_data), 5)
# Project Start Date
self.assertEqual(events_data[0]["date"], "2029-09-30")
# Estimated Construction Start Date
self.assertEqual(events_data[1]["date"], "2030-01-01")
# Estimated Completion Date
self.assertEqual(events_data[2]["date"], "2033-02-01")
# Contracted Construction End Date
self.assertEqual(events_data[3]["date"], "2033-01-31")
# Estimated Construction End Date
self.assertEqual(events_data[4]["date"], "2032-12-31")
def test_events_when_latest_snapshot_has_empty_dates(self):
"""Test that only not null values emitted to events"""
q3 = Quarter(number=3)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q3)
self.project_snapshot_2 = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2, project=self.project, start_date="2029-09-30"
)
events_data = time_series_data([self.project_snapshot, self.project_snapshot_2])
events_data = events_data[u"events"]
self.assertEqual(len(events_data), 1)
# Project Start Date
self.assertEqual(events_data[0]["date"], "2029-09-30")
class SerializeChartDataResultTestCase(TestCase):
def setUp(self):
self.project = ProvInfraProject(IRM_project_id=1)
self.fin_year = FinancialYear(slug="2030-31")
q2 = Quarter(number=2)
irm_snapshot_2 = IRMSnapshot(financial_year=self.fin_year, quarter=q2)
self.project_snapshot = ProvInfraProjectSnapshot(
irm_snapshot=irm_snapshot_2, project=self.project, status="Tender"
)
def test_chart_data_can_be_serialized(self):
"""Test that it is possible to serialize and deserialize chart data"""
original_chart_data = time_series_data([self.project_snapshot])
serialized_data = json.dumps(original_chart_data, cls=JSONEncoder)
converted_chart_data = json.loads(serialized_data)
self.assertEqual(original_chart_data, converted_chart_data)
| 43.067395
| 111
| 0.681272
| 2,891
| 23,644
| 5.288827
| 0.066067
| 0.123283
| 0.100458
| 0.117201
| 0.854022
| 0.818378
| 0.793918
| 0.760039
| 0.710268
| 0.68862
| 0
| 0.048144
| 0.217264
| 23,644
| 548
| 112
| 43.145985
| 0.77803
| 0.093766
| 0
| 0.621027
| 0
| 0
| 0.079547
| 0.008844
| 0
| 0
| 0
| 0
| 0.229829
| 1
| 0.085575
| false
| 0
| 0.012225
| 0
| 0.134474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6040462034a03c9897798ce8a7e74037487ca12c
| 3,225
|
py
|
Python
|
src/example7-arguments.py
|
efe3535/pythonexamples
|
676aeb5d730dc0fc364083fccbd8c241de5bdc0a
|
[
"CC0-1.0"
] | 1
|
2020-11-30T06:53:06.000Z
|
2020-11-30T06:53:06.000Z
|
src/example7-arguments.py
|
efeduino/pythonexamples
|
676aeb5d730dc0fc364083fccbd8c241de5bdc0a
|
[
"CC0-1.0"
] | 2
|
2021-05-16T06:34:36.000Z
|
2021-06-05T16:44:15.000Z
|
src/example7-arguments.py
|
efeduino/pythonexamples
|
676aeb5d730dc0fc364083fccbd8c241de5bdc0a
|
[
"CC0-1.0"
] | 1
|
2021-06-04T12:17:31.000Z
|
2021-06-04T12:17:31.000Z
|
import sys
from colorama import Fore
helpfile = """
======================
| COMMANDS |
======================
--help : Lists commands.
--color : Requires one argument, prints 'Hello World' in color selected.
--allcolors : prints 'Hello World' with all colors.
Colors:
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
"""
nextcolor = False
if len(sys.argv) != 1:
for argument in sys.argv:
# print("Argüman:",argument)
if nextcolor == False:
if argument == "--help":
print(helpfile)
elif argument == "--allcolors":
print(Fore.BLACK, "Hello World", Fore.RESET)
print(Fore.RED, "Hello World", Fore.RESET)
print(Fore.GREEN, "Hello World", Fore.RESET)
print(Fore.YELLOW, "Hello World", Fore.RESET)
print(Fore.BLUE, "Hello World", Fore.RESET)
print(Fore.MAGENTA, "Hello World", Fore.RESET)
print(Fore.CYAN, "Hello World", Fore.RESET)
print(Fore.WHITE, "Hello World", Fore.RESET)
elif argument == "--color":
if(sys.argv[int(sys.argv.index(argument)) + 1] == "BLACK"):
print(Fore.BLACK + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "RED"):
print(Fore.RED + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "GREEN"):
print(Fore.GREEN + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "YELLOW"):
print(Fore.YELLOW + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "BLUE"):
print(Fore.BLUE + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "MAGENTA"):
print(Fore.MAGENTA + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "CYAN"):
print(Fore.CYAN + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
if(sys.argv[int(sys.argv.index(argument)) + 1] == "WHITE"):
print(Fore.WHITE + "Hello World" + Fore.RESET)
nextcolor = not nextcolor
elif argument == "--help":
print(helpfile)
elif argument == "--allcolors":
print(Fore.BLACK, "Hello World", Fore.RESET)
print(Fore.RED, "Hello World", Fore.RESET)
print(Fore.GREEN, "Hello World", Fore.RESET)
print(Fore.YELLOW, "Hello World", Fore.RESET)
print(Fore.BLUE, "Hello World", Fore.RESET)
print(Fore.MAGENTA, "Hello World", Fore.RESET)
print(Fore.CYAN, "Hello World", Fore.RESET)
print(Fore.WHITE, "Hello World", Fore.RESET)
else:
print(helpfile)
| 40.822785
| 77
| 0.52
| 349
| 3,225
| 4.805158
| 0.12894
| 0.155039
| 0.200358
| 0.271914
| 0.782946
| 0.782946
| 0.782946
| 0.692904
| 0.692904
| 0.673226
| 0
| 0.004182
| 0.332713
| 3,225
| 78
| 78
| 41.346154
| 0.775093
| 0.008062
| 0
| 0.476923
| 0
| 0
| 0.197373
| 0.013763
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030769
| 0
| 0.030769
| 0.446154
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
60446f4edb1c084088444d8183d82507c690a285
| 8,143
|
py
|
Python
|
linux/keyman-config/tests/test_install_kmp.py
|
ermshiperete/keyman
|
0eeef1b5794fd698447584e531e2a6c1ef4c05aa
|
[
"MIT"
] | 1
|
2021-03-08T09:31:47.000Z
|
2021-03-08T09:31:47.000Z
|
linux/keyman-config/tests/test_install_kmp.py
|
ermshiperete/keyman
|
0eeef1b5794fd698447584e531e2a6c1ef4c05aa
|
[
"MIT"
] | null | null | null |
linux/keyman-config/tests/test_install_kmp.py
|
ermshiperete/keyman
|
0eeef1b5794fd698447584e531e2a6c1ef4c05aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import unittest
from unittest.mock import patch, ANY
from keyman_config.install_kmp import install_keyboards_to_ibus, install_keyboards_to_gnome
class InstallKmpTests(unittest.TestCase):
def setUp(self):
patcher1 = patch('keyman_config.install_kmp.install_to_ibus')
self.mockInstallToIbus = patcher1.start()
self.addCleanup(patcher1.stop)
patcher2 = patch('keyman_config.install_kmp.restart_ibus')
self.mockRestartIbus = patcher2.start()
self.addCleanup(patcher2.stop)
patcher3 = patch('keyman_config.install_kmp.get_ibus_bus')
self.mockGetIbusBus = patcher3.start()
self.addCleanup(patcher3.stop)
patcher4 = patch('keyman_config.install_kmp.GnomeKeyboardsUtil')
self.mockGnomeKeyboardsUtilClass = patcher4.start()
self.addCleanup(patcher4.stop)
def test_InstallKeyboardsToIbus_NoIbus(self):
# Setup
self.mockGetIbusBus.return_value = None
# Execute
install_keyboards_to_ibus([], None)
# Verify
self.mockRestartIbus.assert_not_called()
def test_InstallKeyboardsToIbus_SingleKbNoLanguages(self):
# Setup
bus = self.mockGetIbusBus.return_value
keyboards = [{'id': 'foo1'}]
# Execute
install_keyboards_to_ibus(keyboards, 'fooDir')
# Verify
self.mockInstallToIbus.assert_called_once_with(ANY, 'fooDir/foo1.kmx')
self.mockRestartIbus.assert_called_once()
bus.destroy.assert_called_once()
def test_InstallKeyboardsToIbus_MultipleKbsNoLanguages(self):
# Setup
bus = self.mockGetIbusBus.return_value
keyboards = [{'id': 'foo1'}, {'id': 'foo2'}]
# Execute
install_keyboards_to_ibus(keyboards, 'fooDir')
# Verify
self.mockInstallToIbus.assert_any_call(ANY, 'fooDir/foo1.kmx')
self.mockInstallToIbus.assert_any_call(ANY, 'fooDir/foo2.kmx')
self.mockRestartIbus.assert_called_once()
bus.destroy.assert_called_once()
def test_InstallKeyboardsToIbus_SingleKbSingleLanguage(self):
# Setup
bus = self.mockGetIbusBus.return_value
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}]}]
# Execute
install_keyboards_to_ibus(keyboards, 'fooDir')
# Verify
self.mockInstallToIbus.assert_called_once_with(ANY, 'en:fooDir/foo1.kmx')
self.mockRestartIbus.assert_called_once()
bus.destroy.assert_called_once()
def test_InstallKeyboardsToIbus_SingleKbMultipleLanguages(self):
# Setup
bus = self.mockGetIbusBus.return_value
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}, {'id': 'fr'}]}]
# Execute
install_keyboards_to_ibus(keyboards, 'fooDir')
# Verify
self.mockInstallToIbus.assert_called_once()
self.mockInstallToIbus.assert_called_with(ANY, 'en:fooDir/foo1.kmx')
# self.mockInstallToIbus.assert_not_called_with(ANY, 'fr:fooDir/foo1.kmx')
self.mockRestartIbus.assert_called_once()
bus.destroy.assert_called_once()
def test_InstallKeyboardsToIbus_SingleKbMultipleLanguages_GivenLanguage(self):
# Setup
bus = self.mockGetIbusBus.return_value
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}, {'id': 'fr'}]}]
# Execute
install_keyboards_to_ibus(keyboards, 'fooDir', 'fr')
# Verify
self.mockInstallToIbus.assert_called_once()
self.mockInstallToIbus.assert_called_with(ANY, 'fr:fooDir/foo1.kmx')
# self.mockInstallToIbus.assert_not_called_with(ANY, 'en:fooDir/foo1.kmx')
self.mockRestartIbus.assert_called_once()
bus.destroy.assert_called_once()
def test_InstallKeyboardsToIbus_SingleKbMultipleLanguages_OtherLanguage(self):
# Setup
bus = self.mockGetIbusBus.return_value
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}, {'id': 'fr'}]}]
# Execute
install_keyboards_to_ibus(keyboards, 'fooDir', 'de')
# Verify
self.mockInstallToIbus.assert_called_once()
self.mockInstallToIbus.assert_called_with(ANY, 'de:fooDir/foo1.kmx')
# self.mockInstallToIbus.assert_not_called_with(ANY, 'en:fooDir/foo1.kmx')
self.mockRestartIbus.assert_called_once()
bus.destroy.assert_called_once()
def test_InstallKeyboardsToGnome_SingleKbNoLanguages(self):
# Setup
mockGnomeKeyboardsUtilInstance = self.mockGnomeKeyboardsUtilClass.return_value
mockGnomeKeyboardsUtilInstance.read_input_sources.return_value = [('xkb', 'en')]
keyboards = [{'id': 'foo1'}]
# Execute
install_keyboards_to_gnome(keyboards, 'fooDir')
# Verify
mockGnomeKeyboardsUtilInstance.write_input_sources.assert_called_once_with(
[('xkb', 'en'), ('ibus', 'fooDir/foo1.kmx')])
self.mockRestartIbus.assert_not_called()
def test_InstallKeyboardsToGnome_MultipleKbsNoLanguages(self):
# Setup
mockGnomeKeyboardsUtilInstance = self.mockGnomeKeyboardsUtilClass.return_value
mockGnomeKeyboardsUtilInstance.read_input_sources.return_value = [('xkb', 'en')]
keyboards = [{'id': 'foo1'}, {'id': 'foo2'}]
# Execute
install_keyboards_to_gnome(keyboards, 'fooDir')
# Verify
mockGnomeKeyboardsUtilInstance.write_input_sources.assert_called_once_with(
[('xkb', 'en'), ('ibus', 'fooDir/foo1.kmx'), ('ibus', 'fooDir/foo2.kmx')])
self.mockRestartIbus.assert_not_called()
def test_InstallKeyboardsToGnome_SingleKbSingleLanguage(self):
# Setup
mockGnomeKeyboardsUtilInstance = self.mockGnomeKeyboardsUtilClass.return_value
mockGnomeKeyboardsUtilInstance.read_input_sources.return_value = [('xkb', 'en')]
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}]}]
# Execute
install_keyboards_to_gnome(keyboards, 'fooDir')
# Verify
mockGnomeKeyboardsUtilInstance.write_input_sources.assert_called_once_with(
[('xkb', 'en'), ('ibus', 'en:fooDir/foo1.kmx')])
self.mockRestartIbus.assert_not_called()
def test_InstallKeyboardsToGnome_SingleKbMultipleLanguages(self):
# Setup
mockGnomeKeyboardsUtilInstance = self.mockGnomeKeyboardsUtilClass.return_value
mockGnomeKeyboardsUtilInstance.read_input_sources.return_value = [('xkb', 'en')]
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}, {'id': 'fr'}]}]
# Execute
install_keyboards_to_gnome(keyboards, 'fooDir')
# Verify
mockGnomeKeyboardsUtilInstance.write_input_sources.assert_called_once_with(
[('xkb', 'en'), ('ibus', 'en:fooDir/foo1.kmx')])
self.mockRestartIbus.assert_not_called()
def test_InstallKeyboardsToGnome_SingleKbMultipleLanguages_GivenLanguage(self):
# Setup
mockGnomeKeyboardsUtilInstance = self.mockGnomeKeyboardsUtilClass.return_value
mockGnomeKeyboardsUtilInstance.read_input_sources.return_value = [('xkb', 'en')]
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}, {'id': 'fr'}]}]
# Execute
install_keyboards_to_gnome(keyboards, 'fooDir', 'fr')
# Verify
mockGnomeKeyboardsUtilInstance.write_input_sources.assert_called_once_with(
[('xkb', 'en'), ('ibus', 'fr:fooDir/foo1.kmx')])
self.mockRestartIbus.assert_not_called()
def test_InstallKeyboardsToGnome_SingleKbMultipleLanguages_OtherLanguage(self):
# Setup
mockGnomeKeyboardsUtilInstance = self.mockGnomeKeyboardsUtilClass.return_value
mockGnomeKeyboardsUtilInstance.read_input_sources.return_value = [('xkb', 'en')]
keyboards = [{'id': 'foo1', 'languages': [{'id': 'en'}, {'id': 'fr'}]}]
# Execute
install_keyboards_to_gnome(keyboards, 'fooDir', 'de')
# Verify
mockGnomeKeyboardsUtilInstance.write_input_sources.assert_called_once_with(
[('xkb', 'en'), ('ibus', 'de:fooDir/foo1.kmx')])
self.mockRestartIbus.assert_not_called()
if __name__ == '__main__':
unittest.main()
| 45.238889
| 91
| 0.679602
| 784
| 8,143
| 6.767857
| 0.09949
| 0.058801
| 0.069355
| 0.044855
| 0.838862
| 0.812288
| 0.80833
| 0.789107
| 0.779118
| 0.764229
| 0
| 0.006753
| 0.199804
| 8,143
| 179
| 92
| 45.49162
| 0.807551
| 0.062508
| 0
| 0.581197
| 0
| 0
| 0.103148
| 0.021209
| 0
| 0
| 0
| 0
| 0.299145
| 1
| 0.119658
| false
| 0
| 0.025641
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
604c0beee54cd72b8932533923ae2b68c32e45a7
| 179,880
|
py
|
Python
|
pynos/versions/base/interface.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 12
|
2015-09-21T23:56:09.000Z
|
2018-03-30T04:35:32.000Z
|
pynos/versions/base/interface.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 10
|
2016-09-15T19:03:27.000Z
|
2017-07-17T23:38:01.000Z
|
pynos/versions/base/interface.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 6
|
2015-08-14T08:05:23.000Z
|
2022-02-03T15:33:54.000Z
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
import xml.etree.ElementTree as ET
from ipaddress import ip_interface
import pynos.utilities
from pynos.exceptions import InvalidVlanId
from pynos.versions.base.yang.brocade_interface import brocade_interface
from pynos.versions.base.yang.brocade_rbridge import brocade_rbridge
from pynos.versions.base.yang.brocade_mac_address_table \
import brocade_mac_address_table
from pynos.versions.base.yang.brocade_tunnels import brocade_tunnels
class Interface(object):
"""
The Interface class holds all the actions assocaiated with the Interfaces
of a NOS device.
Attributes:
None
"""
def __init__(self, callback):
"""
Interface init function.
Args:
callback: Callback function that will be called for each action.
Returns:
Interface Object
Raises:
None
"""
self._callback = callback
self._interface = brocade_interface(
callback=pynos.utilities.return_xml
)
self._rbridge = brocade_rbridge(
callback=pynos.utilities.return_xml
)
self._mac_address_table = brocade_mac_address_table(
callback=pynos.utilities.return_xml
)
self._tunnels = brocade_tunnels(
callback=pynos.utilities.return_xml
)
def add_vlan_int(self, vlan_id):
"""
Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
vlinterface = ET.SubElement(config, 'interface-vlan',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
interface = ET.SubElement(vlinterface, 'interface')
vlan = ET.SubElement(interface, 'vlan')
name = ET.SubElement(vlan, 'name')
name.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def del_vlan_int(self, vlan_id):
"""
Delete VLAN Interface.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
vlinterface = ET.SubElement(config, 'interface-vlan',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
interface = ET.SubElement(vlinterface, 'interface')
vlan = ET.SubElement(interface, 'vlan', operation='delete')
name = ET.SubElement(vlan, 'name')
name.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def enable_switchport(self, inter_type, inter):
"""
Change an interface's operation to L2.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
switchport_basic = ET.SubElement(int_type, 'switchport-basic')
ET.SubElement(switchport_basic, 'basic')
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def disable_switchport(self, inter_type, inter):
"""
Change an interface's operation to L3.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
ET.SubElement(int_type, 'switchport-basic', operation='delete')
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def access_vlan(self, inter_type, inter, vlan_id):
"""
Add a L2 Interface to a specific VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
switchport = ET.SubElement(int_type, 'switchport')
access = ET.SubElement(switchport, 'access')
accessvlan = ET.SubElement(access, 'accessvlan')
accessvlan.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def del_access_vlan(self, inter_type, inter, vlan_id):
"""
Remove a L2 Interface from a specific VLAN, placing it back into the
default VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
switchport = ET.SubElement(int_type, 'switchport')
access = ET.SubElement(switchport, 'access')
accessvlan = ET.SubElement(access, 'accessvlan', operation='delete')
accessvlan.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def set_ip(self, inter_type, inter, ip_addr):
"""
Set IP address of a L3 interface.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
ip_addr: IP Address in <prefix>/<bits> format. Ex: 10.10.10.1/24
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
intert = ET.SubElement(interface, inter_type)
name = ET.SubElement(intert, 'name')
name.text = inter
ipel = ET.SubElement(intert, 'ip')
ip_config = ET.SubElement(
ipel, 'ip-config',
xmlns="urn:brocade.com:mgmt:brocade-ip-config"
)
address = ET.SubElement(ip_config, 'address')
ipaddr = ET.SubElement(address, 'address')
ipaddr.text = ip_addr
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def remove_port_channel(self, **kwargs):
"""
Remove a port channel interface.
Args:
port_int (str): port-channel number (1, 2, 3, etc).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `port_int` is not passed.
ValueError: if `port_int` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name='225/0/20',
... int_type='tengigabitethernet',
... port_int='1', channel_type='standard', mode='active')
... output = dev.interface.remove_port_channel(
... port_int='1')
"""
port_int = kwargs.pop('port_int')
callback = kwargs.pop('callback', self._callback)
if re.search('^[0-9]{1,4}$', port_int) is None:
raise ValueError('%s must be in the format of x for port channel '
'interfaces.' % repr(port_int))
port_channel = getattr(self._interface, 'interface_port_channel_name')
port_channel_args = dict(name=port_int)
config = port_channel(**port_channel_args)
delete_channel = config.find('.//*port-channel')
delete_channel.set('operation', 'delete')
return callback(config)
def ip_address(self, **kwargs):
"""
Set IP Address on an Interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
ip_addr (str): IPv4/IPv6 Virtual IP Address..
Ex: 10.10.10.1/24 or 2001:db8::/48
delete (bool): True is the IP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `ip_addr` is not passed.
ValueError: if `int_type`, `name`, or `ip_addr` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... int_type = 'tengigabitethernet'
... name = '225/0/4'
... ip_addr = '20.10.10.1/24'
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... output = dev.interface.add_vlan_int('86')
... output = dev.interface.ip_address(int_type='ve',
... name='86', ip_addr=ip_addr, rbridge_id='225')
... output = dev.interface.ip_address(int_type='ve',
... name='86', ip_addr=ip_addr, delete=True,
... rbridge_id='225')
... output = dev.interface.ip_address(int_type='loopback',
... name='225', ip_addr='10.225.225.225/32',
... rbridge_id='225')
... output = dev.interface.ip_address(int_type='loopback',
... name='225', ip_addr='10.225.225.225/32', delete=True,
... rbridge_id='225')
... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64'
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... output = dev.interface.ip_address(int_type='ve',
... name='86', ip_addr=ip_addr, rbridge_id='225')
... output = dev.interface.ip_address(int_type='ve',
... name='86', ip_addr=ip_addr, delete=True,
... rbridge_id='225')
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
ip_addr = str(kwargs.pop('ip_addr'))
delete = kwargs.pop('delete', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet', 've',
'fortygigabitethernet', 'hundredgigabitethernet',
'loopback']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
ipaddress = ip_interface(unicode(ip_addr))
ip_args = dict(name=name, address=ip_addr)
method_name = None
method_class = self._interface
if ipaddress.version == 4:
method_name = 'interface_%s_ip_ip_config_address_' \
'address' % int_type
elif ipaddress.version == 6:
method_name = 'interface_%s_ipv6_ipv6_config_address_ipv6_' \
'address_address' % int_type
if int_type == 've':
method_name = "rbridge_id_%s" % method_name
method_class = self._rbridge
ip_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif int_type == 'loopback':
method_name = 'rbridge_id_interface_loopback_ip_ip_config_' \
'address_address'
if ipaddress.version == 6:
method_name = 'rbridge_id_interface_loopback_ipv6_ipv6_' \
'config_address_ipv6_address_address'
method_class = self._rbridge
ip_args['rbridge_id'] = rbridge_id
ip_args['id'] = name
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces.')
ip_address_attr = getattr(method_class, method_name)
config = ip_address_attr(**ip_args)
if delete:
config.find('.//*address').set('operation', 'delete')
try:
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config)
# TODO Setting IP on port channel is not done yet.
except AttributeError:
return None
def get_ip_addresses(self, **kwargs):
"""
Get IP Addresses already set on an Interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
version (int): 4 or 6 to represent IPv4 or IPv6 address
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
List of 0 or more IPs configure on the specified interface.
Raises:
KeyError: if `int_type` or `name` is not passed.
ValueError: if `int_type` or `name` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... int_type = 'tengigabitethernet'
... name = '225/0/4'
... ip_addr = '20.10.10.1/24'
... version = 4
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64'
... version = 6
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
version = int(kwargs.pop('version'))
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
method_name = None
method_class = self._interface
if version == 4:
method_name = 'interface_%s_ip_ip_config_address_' \
'address' % int_type
elif version == 6:
method_name = 'interface_%s_ipv6_ipv6_config_address_ipv6_' \
'address_address' % int_type
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces.')
ip_args = dict(name=name, address='')
ip_address_attr = getattr(method_class, method_name)
config = ip_address_attr(**ip_args)
output = callback(config, handler='get_config')
result = []
if version == 4:
for item in output.data.findall(
'.//{*}address/{*}address'):
result.append(item.text)
elif version == 6:
for item in output.data.findall(
'.//{*}address/{*}ipv6-address/{'
'*}address'):
result.append(item.text)
return result
# TODO Getting IP's from ve and vlan is not done yet.
def del_ip(self, inter_type, inter, ip_addr):
"""
Delete IP address from a L3 interface.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
ip_addr: IP Address in <prefix>/<bits> format. Ex: 10.10.10.1/24
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
intert = ET.SubElement(interface, inter_type)
name = ET.SubElement(intert, 'name')
name.text = inter
ipel = ET.SubElement(intert, 'ip')
ip_config = ET.SubElement(
ipel, 'ip-config',
xmlns="urn:brocade.com:mgmt:brocade-ip-config"
)
address = ET.SubElement(ip_config, 'address', operation='delete')
ipaddr = ET.SubElement(address, 'address')
ipaddr.text = ip_addr
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False
def description(self, **kwargs):
"""Set interface description.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
desc (str): The description of the interface.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `desc` is not specified.
ValueError: if `name`, `int_type`, or `desc` is not a valid
value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.description(
... int_type='tengigabitethernet',
... name='225/0/38',
... desc='test')
... dev.interface.description()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
desc = str(kwargs.pop('desc'))
callback = kwargs.pop('callback', self._callback)
int_types = [
'gigabitethernet',
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet',
'port_channel',
'vlan'
]
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
desc_args = dict(name=name, description=desc)
if int_type == "vlan":
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
config = self._interface.interface_vlan_interface_vlan_description(
**desc_args
)
else:
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
config = getattr(
self._interface,
'interface_%s_description' % int_type
)(**desc_args)
return callback(config)
def private_vlan_type(self, **kwargs):
"""Set the PVLAN type (primary, isolated, community).
Args:
name (str): VLAN ID.
pvlan_type (str): PVLAN type (primary, isolated, community)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `name` or `pvlan_type` is not specified.
ValueError: if `name` or `pvlan_type` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> name = '90'
>>> pvlan_type = 'isolated'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.private_vlan_type(name=name,
... pvlan_type=pvlan_type)
... dev.interface.private_vlan_type()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
name = kwargs.pop('name')
pvlan_type = kwargs.pop('pvlan_type')
callback = kwargs.pop('callback', self._callback)
allowed_pvlan_types = ['isolated', 'primary', 'community']
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("Incorrect name value.")
if pvlan_type not in allowed_pvlan_types:
raise ValueError("Incorrect pvlan_type")
pvlan_args = dict(name=name, pvlan_type_leaf=pvlan_type)
pvlan_type = getattr(self._interface,
'interface_vlan_interface_vlan_'
'private_vlan_pvlan_type_leaf')
config = pvlan_type(**pvlan_args)
return callback(config)
def vlan_pvlan_association_add(self, **kwargs):
"""Add a secondary PVLAN to a primary PVLAN.
Args:
name (str): VLAN number (1-4094).
sec_vlan (str): The secondary PVLAN.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `name` or `sec_vlan` is not specified.
ValueError: if `name` or `sec_vlan` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '20'
>>> sec_vlan = '30'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.private_vlan_type(name=name,
... pvlan_type='primary')
... output = dev.interface.private_vlan_type(name=sec_vlan,
... pvlan_type='isolated')
... output = dev.interface.vlan_pvlan_association_add(
... name=name, sec_vlan=sec_vlan)
... dev.interface.vlan_pvlan_association_add()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
name = kwargs.pop('name')
sec_vlan = kwargs.pop('sec_vlan')
callback = kwargs.pop('callback', self._callback)
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("Incorrect name value.")
if not pynos.utilities.valid_vlan_id(sec_vlan):
raise InvalidVlanId("`sec_vlan` must be between `1` and `8191`.")
pvlan_args = dict(name=name, sec_assoc_add=sec_vlan)
pvlan_assoc = getattr(self._interface,
'interface_vlan_interface_vlan_'
'private_vlan_association_sec_assoc_add')
config = pvlan_assoc(**pvlan_args)
return callback(config)
def pvlan_host_association(self, **kwargs):
"""Set interface PVLAN association.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
pri_vlan (str): The primary PVLAN.
sec_vlan (str): The secondary PVLAN.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is not
specified.
ValueError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan`
is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/38'
>>> pri_vlan = '75'
>>> sec_vlan = '100'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.private_vlan_type(name=pri_vlan,
... pvlan_type='primary')
... output = dev.interface.private_vlan_type(name=sec_vlan,
... pvlan_type='isolated')
... output = dev.interface.vlan_pvlan_association_add(
... name=pri_vlan, sec_vlan=sec_vlan)
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='host')
... output = dev.interface.pvlan_host_association(
... int_type=int_type, name=name, pri_vlan=pri_vlan,
... sec_vlan=sec_vlan)
... dev.interface.pvlan_host_association()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
pri_vlan = kwargs.pop('pri_vlan')
sec_vlan = kwargs.pop('sec_vlan')
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
if not pynos.utilities.valid_vlan_id(pri_vlan):
raise InvalidVlanId("`sec_vlan` must be between `1` and `4095`.")
if not pynos.utilities.valid_vlan_id(sec_vlan):
raise InvalidVlanId("`sec_vlan` must be between `1` and `4095`.")
pvlan_args = dict(name=name, host_pri_pvlan=pri_vlan)
associate_pvlan = getattr(self._interface,
'interface_%s_switchport_private_vlan_'
'host_association_host_pri_pvlan' %
int_type)
config = associate_pvlan(**pvlan_args)
sec_assoc = config.find('.//*host-association')
sec_assoc = ET.SubElement(sec_assoc, 'host-sec-pvlan')
sec_assoc.text = sec_vlan
return callback(config)
def admin_state(self, **kwargs):
"""Set interface administrative state.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
enabled (bool): Is the interface enabled? (True, False)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `enabled` is not passed and
`get` is not ``True``.
ValueError: if `int_type`, `name`, or `enabled` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... dev.interface.admin_state(
... int_type='tengigabitethernet', name='225/0/38',
... enabled=False)
... dev.interface.admin_state(
... int_type='tengigabitethernet', name='225/0/38',
... enabled=True)
... output = dev.interface.add_vlan_int('87')
... output = dev.interface.ip_address(int_type='ve',
... name='87', ip_addr='10.0.0.1/24', rbridge_id='225')
... output = dev.interface.admin_state(int_type='ve',
... name='87', enabled=True, rbridge_id='225')
... output = dev.interface.admin_state(int_type='ve',
... name='87', enabled=False, rbridge_id='225')
... output = dev.interface.ip_address(int_type='loopback',
... name='225', ip_addr='10.225.225.225/32',
... rbridge_id='225')
... output = dev.interface.admin_state(int_type='loopback',
... name='225', enabled=True, rbridge_id='225')
... output = dev.interface.admin_state(int_type='loopback',
... name='225', enabled=False, rbridge_id='225')
... output = dev.interface.ip_address(int_type='loopback',
... name='225', ip_addr='10.225.225.225/32',
... rbridge_id='225', delete=True)
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
get = kwargs.pop('get', False)
if get:
enabled = None
else:
enabled = kwargs.pop('enabled')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've', 'loopback']
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if not isinstance(enabled, bool) and not get:
raise ValueError('`enabled` must be `True` or `False`.')
state_args = dict(name=name)
method_name = 'interface_%s_shutdown' % int_type
method_class = self._interface
if int_type == 've':
method_name = "rbridge_id_%s" % method_name
method_class = self._rbridge
state_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif int_type == 'loopback':
method_name = 'rbridge_id_interface_{0}_intf_' \
'{0}_shutdown'.format(int_type)
method_class = self._rbridge
state_args['rbridge_id'] = rbridge_id
state_args['id'] = name
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
admin_state = getattr(method_class, method_name)
config = admin_state(**state_args)
if enabled:
config.find('.//*shutdown').set('operation', 'delete')
try:
if get:
return callback(config, handler='get_config')
else:
return callback(config)
# TODO: Catch existing 'no shut'
# This is in place because if the interface is already admin up,
# `ncclient` will raise an error if you try to admin up the interface
# again.
except AttributeError:
return None
def pvlan_trunk_association(self, **kwargs):
"""Set switchport private vlan host association.
Args:
Returns:
Raises:
Examples:
"""
pass
def trunk_allowed_vlan(self, **kwargs):
"""Modify allowed VLANs on Trunk (add, remove, none, all).
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
action (str): Action to take on trunk. (add, remove, none, all)
get (bool): Get config instead of editing config. (True, False)
vlan (str): vlan id for action. Only valid for add and remove.
ctag (str): ctag range. Only valid for add and remove.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mode` is not specified.
ValueError: if `int_type`, `name`, or `mode` is invalid.
Examples:
>>> # Skip due to current dev work
>>> # TODO: Reenable after dev work
>>> def test_trunk_allowed_vlan():
... import pynos.device
... switches = ['10.24.39.212', '10.24.39.202']
... auth = ('admin', 'password')
... int_type = 'tengigabitethernet'
... name = '226/0/4'
... for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.trunk_mode(name=name,
... int_type=int_type, mode='trunk')
... output = dev.interface.add_vlan_int('25')
... output = dev.interface.add_vlan_int('8000')
... output = dev.interface.trunk_allowed_vlan(
... int_type=int_type, name=name, action='add',
... ctag='25', vlan='8000')
... dev.interface.private_vlan_mode()
... # doctest: +IGNORE_EXCEPTION_DETAIL
>>> test_trunk_allowed_vlan() # doctest: +SKIP
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
action = kwargs.pop('action')
ctag = kwargs.pop('ctag', None)
vlan = kwargs.pop('vlan', None)
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
valid_actions = ['add', 'remove', 'none', 'all']
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" %
repr(int_types))
if action not in valid_actions:
raise ValueError('%s must be one of: %s' % (action, valid_actions))
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
allowed_vlan_args = dict(name=name,
add=vlan,
remove=vlan,
trunk_vlan_id=vlan,
trunk_ctag_range=ctag)
ctag_actions = ['add', 'remove']
if ctag and not vlan:
raise ValueError('vlan must be set when ctag is set ')
if ctag and action not in ctag_actions:
raise ValueError('%s must be in %s when %s is set '
% (repr(action),
repr(ctag_actions),
repr(ctag)))
if not ctag:
allowed_vlan = getattr(self._interface,
'interface_%s_switchport_trunk_'
'allowed_vlan_%s' %
(int_type, action))
else:
allowed_vlan = getattr(self._interface,
'interface_%s_switchport_trunk_trunk_vlan_'
'classification_allowed_vlan_%s_trunk_'
'ctag_range'
% ((int_type, action)))
config = allowed_vlan(**allowed_vlan_args)
return callback(config)
def private_vlan_mode(self, **kwargs):
"""Set PVLAN mode (promiscuous, host, trunk).
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mode (str): The switchport PVLAN mode.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mode` is not specified.
ValueError: if `int_type`, `name`, or `mode` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/38'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='trunk_host')
... dev.interface.private_vlan_mode()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
mode = kwargs.pop('mode').lower()
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
valid_modes = ['host', 'promiscuous', 'trunk_host',
'trunk_basic', 'trunk_promiscuous']
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
if mode not in valid_modes:
raise ValueError('%s must be one of: %s' % (mode, valid_modes))
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
pvlan_args = dict(name=name)
if 'trunk' in mode:
pvlan_mode = getattr(self._interface,
'interface_%s_switchport_mode_'
'private_vlan_private_vlan_trunk_%s' %
(int_type, mode))
else:
pvlan_mode = getattr(self._interface,
'interface_%s_switchport_mode_'
'private_vlan_%s' % (int_type, mode))
config = pvlan_mode(**pvlan_args)
return callback(config)
def spanning_tree_state(self, **kwargs):
"""Set Spanning Tree state.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, vlan, port_channel etc).
name (str): Name of interface or VLAN id.
(For interface: 1/0/5, 1/0/10 etc).
(For VLANs 0, 1, 100 etc).
(For Port Channels 1, 100 etc).
enabled (bool): Is Spanning Tree enabled? (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `enabled` is not passed.
ValueError: if `int_type`, `name`, or `enabled` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... enabled = True
... int_type = 'tengigabitethernet'
... name = '225/0/37'
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.spanning_tree_state(
... int_type=int_type, name=name, enabled=enabled)
... enabled = False
... output = dev.interface.spanning_tree_state(
... int_type=int_type, name=name, enabled=enabled)
... int_type = 'vlan'
... name = '102'
... enabled = False
... output = dev.interface.add_vlan_int(name)
... output = dev.interface.enable_switchport(
... int_type, name)
... output = dev.interface.spanning_tree_state(
... int_type=int_type, name=name, enabled=enabled)
... enabled = False
... output = dev.interface.spanning_tree_state(
... int_type=int_type, name=name, enabled=enabled)
... output = dev.interface.del_vlan_int(name)
... int_type = 'port_channel'
... name = '2'
... enabled = False
... output = dev.interface.channel_group(name='225/0/20',
... int_type='tengigabitethernet',
... port_int=name,
... channel_type='standard',
... mode='active')
... output = dev.interface.enable_switchport(
... int_type, name)
... output = dev.interface.spanning_tree_state(
... int_type=int_type, name=name, enabled=enabled)
... enabled = False
... output = dev.interface.spanning_tree_state(
... int_type=int_type, name=name, enabled=enabled)
... output = dev.interface.remove_port_channel(
... port_int=name)
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
enabled = kwargs.pop('enabled')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 'vlan']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
if not isinstance(enabled, bool):
raise ValueError('%s must be `True` or `False`.' % repr(enabled))
if int_type == 'vlan':
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId('%s must be between 0 to 8191.' % int_type)
state_args = dict(name=name)
spanning_tree_state = getattr(self._interface,
'interface_%s_interface_%s_spanning_'
'tree_stp_shutdown' % (int_type,
int_type))
else:
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
state_args = dict(name=name)
spanning_tree_state = getattr(self._interface,
'interface_%s_spanning_tree_'
'shutdown' % int_type)
config = spanning_tree_state(**state_args)
if enabled:
if int_type == 'vlan':
shutdown = config.find('.//*stp-shutdown')
else:
shutdown = config.find('.//*shutdown')
shutdown.set('operation', 'delete')
try:
return callback(config)
# TODO: Catch existing 'no shut'
# This is in place because if the interface spanning tree is already
# up,`ncclient` will raise an error if you try to admin up the
# interface again.
# TODO: add logic to shutdown STP at protocol level too.
except AttributeError:
return None
def tag_native_vlan(self, **kwargs):
"""Set tagging of native VLAN on trunk.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mode (str): Trunk port mode (trunk, trunk-no-default-native).
enabled (bool): Is tagging of the VLAN enabled on trunks?
(True, False)
callback (function): A function executed upon completion oj the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `state` is not specified.
ValueError: if `int_type`, `name`, or `state` is not valid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.trunk_mode(
... int_type='tengigabitethernet',
... name='225/0/38', mode='trunk')
... output = dev.interface.tag_native_vlan(name='225/0/38',
... int_type='tengigabitethernet')
... output = dev.interface.tag_native_vlan(
... int_type='tengigabitethernet',
... name='225/0/38', enabled=False)
... dev.interface.tag_native_vlan()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
if not isinstance(enabled, bool):
raise ValueError("Invalid state.")
tag_args = dict(name=name)
tag_native_vlan = getattr(self._interface, 'interface_%s_switchport_'
'trunk_tag_native_vlan' % int_type)
config = tag_native_vlan(**tag_args)
if not enabled:
untag = config.find('.//*native-vlan')
untag.set('operation', 'delete')
try:
return callback(config)
# TODO: Catch existing 'no switchport tag native-vlan'
except AttributeError:
return None
def switchport_pvlan_mapping(self, **kwargs):
"""Switchport private VLAN mapping.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
pri_vlan (str): The primary PVLAN.
sec_vlan (str): The secondary PVLAN.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mode` is not specified.
ValueError: if `int_type`, `name`, or `mode` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/37'
>>> pri_vlan = '3000'
>>> sec_vlan = ['3001', '3002']
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.private_vlan_type(name=pri_vlan,
... pvlan_type='primary')
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='trunk_promiscuous')
... for spvlan in sec_vlan:
... output = dev.interface.private_vlan_type(
... name=spvlan, pvlan_type='isolated')
... output = dev.interface.vlan_pvlan_association_add(
... name=pri_vlan, sec_vlan=spvlan)
... output = dev.interface.switchport_pvlan_mapping(
... int_type=int_type, name=name, pri_vlan=pri_vlan,
... sec_vlan=spvlan)
... dev.interface.switchport_pvlan_mapping()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
pri_vlan = kwargs.pop('pri_vlan')
sec_vlan = kwargs.pop('sec_vlan')
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("`name` must be in the format of x/y/x for "
"physical interfaces or x for port channel.")
if not pynos.utilities.valid_vlan_id(pri_vlan, extended=True):
raise InvalidVlanId("`pri_vlan` must be between `1` and `4096`")
if not pynos.utilities.valid_vlan_id(sec_vlan, extended=True):
raise InvalidVlanId("`sec_vlan` must be between `1` and `4096`")
pvlan_args = dict(name=name,
promis_pri_pvlan=pri_vlan,
promis_sec_pvlan_range=sec_vlan)
pvlan_mapping = getattr(self._interface,
'interface_gigabitethernet_switchport_'
'private_vlan_mapping_promis_sec_pvlan_range')
config = pvlan_mapping(**pvlan_args)
return callback(config)
def mtu(self, **kwargs):
"""Set interface mtu.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mtu (str): Value between 1522 and 9216
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mtu` is not specified.
ValueError: if `int_type`, `name`, or `mtu` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.mtu(mtu='1666',
... int_type='tengigabitethernet', name='225/0/38')
... dev.interface.mtu() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
mtu = kwargs.pop('mtu')
callback = kwargs.pop('callback', self._callback)
int_types = [
'gigabitethernet',
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet',
'port_channel'
]
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
minimum_mtu = 1522
maximum_mtu = 9216
if int(mtu) < minimum_mtu or int(mtu) > maximum_mtu:
raise ValueError("Incorrect mtu value 1522-9216")
mtu_args = dict(name=name, mtu=mtu)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
config = getattr(
self._interface,
'interface_%s_mtu' % int_type
)(**mtu_args)
return callback(config)
def ip_mtu(self, **kwargs):
"""Set interface mtu.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mtu (str): Value between 1300 and 9018
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mtu` is not specified.
ValueError: if `int_type`, `name`, or `mtu` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ip_mtu(mtu='1666',
... int_type='tengigabitethernet', name='225/0/38')
... dev.interface.ip_mtu() # doctest:
+IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
mtu = kwargs.pop('mtu')
callback = kwargs.pop('callback', self._callback)
int_types = [
'gigabitethernet',
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet'
]
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
minimum_mtu = 1300
maximum_mtu = 9018
if int(mtu) < minimum_mtu or int(mtu) > maximum_mtu:
raise ValueError("Incorrect mtu value 1300-9018")
mtu_args = dict(name=name, mtu=mtu)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces.')
config = getattr(
self._interface,
'interface_%s_ip_ip_config_mtu' % int_type
)(**mtu_args)
return callback(config)
def fabric_isl(self, **kwargs):
"""Set fabric ISL state.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
enabled (bool): Is fabric ISL state enabled? (True, False)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `state` is not specified.
ValueError: if `int_type`, `name`, or `state` is not a valid value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.fabric_isl(
... int_type='tengigabitethernet',
... name='225/0/40',
... enabled=False)
... dev.interface.fabric_isl()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
int_types = [
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet'
]
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" %
repr(int_types))
if not isinstance(enabled, bool):
raise ValueError('`enabled` must be `True` or `False`.')
fabric_isl_args = dict(name=name)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("`name` must match `^[0-9]{1,3}/[0-9]{1,3}/[0-9]"
"{1,3}$`")
config = getattr(
self._interface,
'interface_%s_fabric_fabric_isl_fabric_isl_enable' % int_type
)(**fabric_isl_args)
if not enabled:
fabric_isl = config.find('.//*fabric-isl')
fabric_isl.set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config)
def fabric_trunk(self, **kwargs):
"""Set fabric trunk state.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
enabled (bool): Is Fabric trunk enabled? (True, False)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `state` is not specified.
ValueError: if `int_type`, `name`, or `state` is not a valid value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.fabric_trunk(name='225/0/40',
... int_type='tengigabitethernet', enabled=False)
... dev.interface.fabric_trunk()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
int_types = [
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet'
]
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
if not isinstance(enabled, bool):
raise ValueError('`enabled` must be `True` or `False`.')
fabric_trunk_args = dict(name=name)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("`name` must match `^[0-9]{1,3}/[0-9]{1,3}/[0-9]"
"{1,3}$`")
config = getattr(
self._interface,
'interface_%s_fabric_fabric_trunk_fabric_trunk_enable' % int_type
)(**fabric_trunk_args)
if not enabled:
fabric_trunk = config.find('.//*fabric-trunk')
fabric_trunk.set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config)
def v6_nd_suppress_ra(self, **kwargs):
"""Disable IPv6 Router Advertisements
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `rbridge_id` is not specified.
ValueError: if `int_type`, `name`, or `rbridge_id` is not a valid
value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.add_vlan_int('10')
... output = dev.interface.v6_nd_suppress_ra(name='10',
... int_type='ve', rbridge_id='225')
... dev.interface.v6_nd_suppress_ra()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
callback = kwargs.pop('callback', self._callback)
int_types = [
'gigabitethernet',
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet',
've'
]
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
if int_type == "ve":
if not pynos.utilities.valid_vlan_id(name):
raise ValueError("`name` must be between `1` and `8191`")
rbridge_id = kwargs.pop('rbridge_id', "1")
nd_suppress_args = dict(name=name, rbridge_id=rbridge_id)
nd_suppress = getattr(self._rbridge,
'rbridge_id_interface_ve_ipv6_'
'ipv6_nd_ra_ipv6_intf_cmds_'
'nd_suppress_ra_suppress_ra_all')
config = nd_suppress(**nd_suppress_args)
else:
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("`name` must match "
"`^[0-9]{1,3}/[0-9]{1,3}/[0-9]{1,3}$`")
nd_suppress_args = dict(name=name)
nd_suppress = getattr(self._interface,
'interface_%s_ipv6_ipv6_nd_ra_'
'ipv6_intf_cmds_nd_suppress_ra_'
'suppress_ra_all' % int_type)
config = nd_suppress(**nd_suppress_args)
return callback(config)
def vrrp_vip(self, **kwargs):
"""Set VRRP VIP.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
vrid (str): VRRPv3 ID.
vip (str): IPv4/IPv6 Virtual IP Address.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.anycast_mac(rbridge_id='225',
... mac='aabb.ccdd.eeff', delete=True)
... output = dev.services.vrrp(ip_version='6',
... enabled=True, rbridge_id='225')
... output = dev.services.vrrp(enabled=True,
... rbridge_id='225')
... output = dev.interface.set_ip('tengigabitethernet',
... '225/0/18', '10.1.1.2/24')
... output = dev.interface.ip_address(name='225/0/18',
... int_type='tengigabitethernet',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1', vip='10.1.1.1/24')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='fe80::cafe:beef:1000:1/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='2001:4818:f000:1ab:cafe:beef:1000:1/64')
... output = dev.interface.add_vlan_int('89')
... output = dev.interface.ip_address(name='89',
... int_type='ve', ip_addr='172.16.1.1/24',
... rbridge_id='225')
... output = dev.interface.ip_address(name='89',
... int_type='ve', rbridge_id='225',
... ip_addr='2002:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='172.16.1.2/24', rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='fe80::dafe:beef:1000:1/64',
... rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... rbridge_id='225')
... output = dev.services.vrrp(ip_version='6',
... enabled=False, rbridge_id='225')
... output = dev.services.vrrp(enabled=False,
... rbridge_id='225')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
vip = kwargs.pop('vip')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
ipaddress = ip_interface(unicode(vip))
vrrp_vip = None
vrrp_args = dict(name=name,
vrid=vrid,
virtual_ipaddr=str(ipaddress.ip))
method_class = self._interface
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if ipaddress.version == 4:
vrrp_args['version'] = '3'
method_name = 'interface_%s_vrrp_virtual_ip_virtual_' \
'ipaddr' % int_type
elif ipaddress.version == 6:
method_name = 'interface_%s_ipv6_vrrpv3_group_virtual_ip_' \
'virtual_ipaddr' % int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
if ipaddress.version == 6:
method_name = method_name.replace('group_', '')
method_class = self._rbridge
vrrp_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrp_vip = getattr(method_class, method_name)
config = vrrp_vip(**vrrp_args)
return callback(config)
def vrrp_state(self, **kwargs):
"""Set VRRP state (enabled, disabled).
Args:
Returns:
Raises:
Examples:
"""
pass
def vrrp_preempt(self, **kwargs):
"""Set VRRP preempt mode (enabled, disabled).
Args:
Returns:
Raises:
Examples:
"""
pass
def vrrp_priority(self, **kwargs):
"""Set VRRP priority.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
vrid (str): VRRPv3 ID.
priority (str): VRRP Priority.
ip_version (str): Version of IP (4, 6).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, `priority`, or
`ip_version` is not passed.
ValueError: if `int_type`, `name`, `vrid`, `priority`, or
`ip_version` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.anycast_mac(rbridge_id='225',
... mac='aabb.ccdd.eeff', delete=True)
... output = dev.services.vrrp(ip_version='6',
... enabled=True, rbridge_id='225')
... output = dev.services.vrrp(enabled=True,
... rbridge_id='225')
... output = dev.interface.set_ip('tengigabitethernet',
... '225/0/18', '10.1.1.2/24')
... output = dev.interface.ip_address(name='225/0/18',
... int_type='tengigabitethernet',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1', vip='10.1.1.1/24')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='fe80::cafe:beef:1000:1/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='2001:4818:f000:1ab:cafe:beef:1000:1/64')
... dev.interface.vrrp_priority(
... int_type='tengigabitethernet',
... name='225/0/18', vrid='1', ip_version='4',
... priority='66')
... dev.interface.vrrp_priority(
... int_type='tengigabitethernet',
... name='225/0/18', vrid='1', ip_version='6',
... priority='77')
... output = dev.interface.add_vlan_int('88')
... output = dev.interface.ip_address(int_type='ve',
... name='88', ip_addr='172.16.10.1/24', rbridge_id='225')
... output = dev.interface.ip_address(int_type='ve',
... name='88', rbridge_id='225',
... ip_addr='2003:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='ve', name='88',
... vrid='1', vip='172.16.10.2/24', rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='88',
... rbridge_id='225', vrid='1',
... vip='fe80::dafe:beef:1000:1/64')
... dev.interface.vrrp_vip(int_type='ve', rbridge_id='225',
... name='88', vrid='1',
... vip='2003:4818:f000:1ab:cafe:beef:1000:1/64')
... dev.interface.vrrp_priority(int_type='ve', name='88',
... rbridge_id='225', vrid='1', ip_version='4',
... priority='66')
... dev.interface.vrrp_priority(int_type='ve', name='88',
... rbridge_id='225', vrid='1', ip_version='6',
... priority='77')
... output = dev.services.vrrp(ip_version='6',
... enabled=False, rbridge_id='225')
... output = dev.services.vrrp(enabled=False,
... rbridge_id='225')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
priority = kwargs.pop('priority')
ip_version = int(kwargs.pop('ip_version'))
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
vrrp_args = dict(name=name, vrid=vrid, priority=priority)
vrrp_priority = None
method_name = None
method_class = self._interface
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if ip_version == 4:
vrrp_args['version'] = '3'
method_name = 'interface_%s_vrrp_priority' % int_type
elif ip_version == 6:
method_name = 'interface_%s_ipv6_vrrpv3_group_priority' % int_type
if int_type == 've':
method_name = "rbridge_id_%s" % method_name
if ip_version == 6:
method_name = method_name.replace('group_', '')
method_class = self._rbridge
vrrp_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrp_priority = getattr(method_class, method_name)
config = vrrp_priority(**vrrp_args)
return callback(config)
def vrrp_advertisement_interval(self, **kwargs):
"""Set VRRP advertisement interval.
Args:
Returns:
Raises:
Examples:
"""
pass
def proxy_arp(self, **kwargs):
"""Set interface administrative state.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
enabled (bool): Is proxy-arp enabled? (True, False)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `state` is not passed.
ValueError: if `int_type`, `name`, or `state` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... dev.interface.proxy_arp(int_type='tengigabitethernet',
... name='225/0/12', enabled=True)
... dev.interface.proxy_arp(int_type='tengigabitethernet',
... name='225/0/12', enabled=False)
... output = dev.interface.add_vlan_int('86')
... output = dev.interface.ip_address(int_type='ve',
... name='86', ip_addr='172.16.2.1/24', rbridge_id='225')
... output = dev.interface.proxy_arp(int_type='ve',
... name='86', enabled=True, rbridge_id='225')
... output = dev.interface.proxy_arp(int_type='ve',
... name='86', enabled=False, rbridge_id='225')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
enabled = kwargs.pop('enabled', True)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if not isinstance(enabled, bool):
raise ValueError('`enabled` must be `True` or `False`.')
method_name = 'interface_%s_ip_ip_config_proxy_arp' % int_type
method_class = self._interface
proxy_arp_args = dict(name=name)
if int_type == 've':
method_name = "rbridge_id_%s" % method_name
method_class = self._rbridge
proxy_arp_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'phyiscal interfaces or x for port channel.')
proxy_arp = getattr(method_class, method_name)
config = proxy_arp(**proxy_arp_args)
if not enabled:
config.find('.//*proxy-arp').set('operation', 'delete')
try:
return callback(config)
# TODO: Catch existing 'no proxy arp'
# This is in place because if proxy arp is already disabled,
# `ncclient` will raise an error if you try to disable it again.
except AttributeError:
return None
def port_channel_minimum_links(self, **kwargs):
"""Set minimum number of links in a port channel.
Args:
name (str): Port-channel number. (1, 5, etc)
minimum_links (str): Minimum number of links in channel group.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `name` or `minimum_links` is not specified.
ValueError: if `name` is not a valid value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.port_channel_minimum_links(
... name='1', minimum_links='2')
... dev.interface.port_channel_minimum_links()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
name = str(kwargs.pop('name'))
minimum_links = str(kwargs.pop('minimum_links'))
callback = kwargs.pop('callback', self._callback)
min_links_args = dict(name=name, minimum_links=minimum_links)
if not pynos.utilities.valid_interface('port_channel', name):
raise ValueError("`name` must match `^[0-9]{1,3}${1,3}$`")
config = getattr(
self._interface,
'interface_port_channel_minimum_links'
)(**min_links_args)
return callback(config)
def channel_group(self, **kwargs):
"""set channel group mode.
args:
int_type (str): type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): name of interface. (1/0/5, 1/0/10, etc)
port_int (str): port-channel number (1, 2, 3, etc).
channel_type (str): tiype of port-channel (standard, brocade)
mode (str): mode of channel group (active, on, passive).
delete (bool): Removes channel group configuration from this
interface if `delete` is ``True``.
callback (function): a function executed upon completion of the
method. the only parameter passed to `callback` will be the
``elementtree`` `config`.
returns:
return value of `callback`.
raises:
keyerror: if `int_type`, `name`, or `description` is not specified.
valueerror: if `name` or `int_type` are not valid values.
examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name='225/0/20',
... int_type='tengigabitethernet',
... port_int='1', channel_type='standard', mode='active')
... dev.interface.channel_group()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
channel_type = kwargs.pop('channel_type')
port_int = kwargs.pop('port_int')
mode = kwargs.pop('mode')
delete = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
int_types = [
'gigabitethernet',
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet'
]
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
valid_modes = ['active', 'on', 'passive']
if mode not in valid_modes:
raise ValueError("`mode` must be one of: %s" % repr(valid_modes))
valid_types = ['brocade', 'standard']
if channel_type not in valid_types:
raise ValueError("`channel_type` must be one of: %s" %
repr(valid_types))
if not pynos.utilities.valid_interface('port_channel', port_int):
raise ValueError("incorrect port_int value.")
channel_group_args = dict(name=name, mode=mode)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("incorrect name value.")
config = getattr(
self._interface,
'interface_%s_channel_group_mode' % int_type
)(**channel_group_args)
channel_group = config.find('.//*channel-group')
if delete is True:
channel_group.set('operation', 'delete')
else:
ET.SubElement(channel_group, 'port-int').text = port_int
ET.SubElement(channel_group, 'type').text = channel_type
return callback(config)
def port_channel_vlag_ignore_split(self, **kwargs):
"""Ignore VLAG Split.
Args:
name (str): Port-channel number. (1, 5, etc)
enabled (bool): Is ignore split enabled? (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `name` or `enable` is not specified.
ValueError: if `name` is not a valid value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.port_channel_vlag_ignore_split(
... name='1', enabled=True)
... dev.interface.port_channel_vlag_ignore_split()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
name = str(kwargs.pop('name'))
enabled = bool(kwargs.pop('enabled', True))
callback = kwargs.pop('callback', self._callback)
vlag_ignore_args = dict(name=name)
if not pynos.utilities.valid_interface('port_channel', name):
raise ValueError("`name` must match x")
config = getattr(
self._interface,
'interface_port_channel_vlag_ignore_split'
)(**vlag_ignore_args)
if not enabled:
ignore_split = config.find('.//*ignore-split')
ignore_split.set('operation', 'delete')
return callback(config)
def trunk_mode(self, **kwargs):
"""Set trunk mode (trunk, trunk-no-default-vlan).
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mode (str): Trunk port mode (trunk, trunk-no-default-native).
callback (function): A function executed upon completion oj the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mode` is not specified.
ValueError: if `int_type`, `name`, or `mode` is not valid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.trunk_mode(name='225/0/38',
... int_type='tengigabitethernet', mode='trunk')
... dev.interface.trunk_mode()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
mode = kwargs.pop('mode').lower()
get = kwargs.pop('get', False)
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
valid_modes = ['trunk', 'trunk-no-default-native']
if mode not in valid_modes:
raise ValueError("Incorrect mode value")
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
mode_args = dict(name=name, vlan_mode=mode)
switchport_mode = getattr(self._interface, 'interface_%s_switchport_'
'mode_vlan_mode' % int_type)
config = switchport_mode(**mode_args)
if get:
return callback(config, handler='get_config')
config = switchport_mode(**mode_args)
return callback(config)
def transport_service(self, **kwargs):
"""Configure VLAN Transport Service.
Args:
vlan (str): The VLAN ID.
service_id (str): The transport-service ID.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `vlan` or `service_id` is not specified.
ValueError: if `vlan` is invalid.
Examples:
>>> # Skip due to current work in devel
>>> # TODO: Reenable
>>> def test_transport_service():
... import pynos.device
... switches = ['10.24.39.212', '10.24.39.202']
... auth = ('admin', 'password')
... vlan = '6666'
... service_id = '1'
... for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.add_vlan_int(vlan)
... output = dev.interface.spanning_tree_state(
... int_type='vlan', name=vlan, enabled=False)
... output = dev.interface.transport_service(vlan=vlan,
... service_id=service_id)
... dev.interface.transport_service()
... # doctest: +IGNORE_EXCEPTION_DETAIL
>>> test_transport_service() # doctest: +SKIP
"""
vlan = kwargs.pop('vlan')
service_id = kwargs.pop('service_id')
callback = kwargs.pop('callback', self._callback)
if not pynos.utilities.valid_vlan_id(vlan, extended=True):
raise InvalidVlanId("vlan must be between `1` and `8191`")
service_args = dict(name=vlan, transport_service=service_id)
transport_service = getattr(self._interface,
'interface_vlan_interface_vlan_'
'transport_service')
config = transport_service(**service_args)
return callback(config)
def lacp_timeout(self, **kwargs):
"""Set lacp timeout.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
timeout (str): Timeout length. (short, long)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `timeout` is not specified.
ValueError: if `int_type`, `name`, or `timeout is not valid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/39'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name=name,
... int_type=int_type, port_int='1',
... channel_type='standard', mode='active')
... output = dev.interface.lacp_timeout(name=name,
... int_type=int_type, timeout='long')
... dev.interface.lacp_timeout()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
timeout = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
int_types = [
'gigabitethernet',
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet'
]
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
valid_timeouts = ['long', 'short']
if timeout not in valid_timeouts:
raise ValueError("Incorrect timeout value")
timeout_args = dict(name=name, timeout=timeout)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("Incorrect name value.")
config = getattr(
self._interface,
'interface_%s_lacp_timeout' % int_type
)(**timeout_args)
return callback(config)
def switchport(self, **kwargs):
"""Set interface switchport status.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
enabled (bool): Is the interface enabled? (True, False)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type` or `name` is not specified.
ValueError: if `name` or `int_type` is not a valid
value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.switchport(name='225/0/19',
... int_type='tengigabitethernet')
... output = dev.interface.switchport(name='225/0/19',
... int_type='tengigabitethernet', enabled=False)
... dev.interface.switchport()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 'vlan']
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
switchport_args = dict(name=name)
switchport = getattr(self._interface,
'interface_%s_switchport_basic_basic' % int_type)
config = switchport(**switchport_args)
if not enabled:
config.find('.//*switchport-basic').set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config)
def acc_vlan(self, **kwargs):
"""Set access VLAN on a port.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
vlan (str): VLAN ID to set as the access VLAN.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `vlan` is not specified.
ValueError: if `int_type`, `name`, or `vlan` is not valid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/30'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.add_vlan_int('736')
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.acc_vlan(int_type=int_type,
... name=name, vlan='736')
... dev.interface.acc_vlan()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type')
name = kwargs.pop('name')
vlan = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" % repr(int_types))
if not pynos.utilities.valid_vlan_id(vlan):
raise InvalidVlanId("`name` must be between `1` and `4096`")
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vlan_args = dict(name=name, accessvlan=vlan)
access_vlan = getattr(self._interface,
'interface_%s_switchport_access_accessvlan' %
int_type)
config = access_vlan(**vlan_args)
return callback(config)
@property
def interfaces(self):
"""list[dict]: A list of dictionary items describing the operational
state of interfaces.
This method currently only lists the Physical Interfaces (
Gigabitethernet, tengigabitethernet, fortygigabitethernet,
hundredgigabitethernet) and Loopback interfaces. It currently
excludes VLAN interfaces, FCoE, Port-Channels, Management and Fibre
Channel ports.
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
int_ns = 'urn:brocade.com:mgmt:brocade-interface-ext'
result = []
has_more = ''
last_interface_name = ''
last_interface_type = ''
while (has_more == '') or (has_more == 'true'):
request_interface = self.get_interface_detail_request(
last_interface_name, last_interface_type)
interface_result = self._callback(request_interface, 'get')
has_more = interface_result.find('%shas-more' % urn).text
for item in interface_result.findall('%sinterface' % urn):
interface_type = item.find('%sinterface-type' % urn).text
interface_name = item.find('%sinterface-name' % urn).text
last_interface_type = interface_type
last_interface_name = interface_name
if "gigabitethernet" in interface_type:
interface_role = item.find('%sport-role' % urn).text
if_name = item.find('%sif-name' % urn).text
interface_state = item.find('%sif-state' % urn).text
interface_proto_state = item.find('%sline-protocol-state' %
urn).text
interface_mac = item.find(
'%scurrent-hardware-address' % urn).text
item_results = {'interface-type': interface_type,
'interface-name': interface_name,
'interface-role': interface_role,
'if-name': if_name,
'interface-state': interface_state,
'interface-proto-state':
interface_proto_state,
'interface-mac': interface_mac}
result.append(item_results)
# Loopback interfaces. Probably for other non-physical interfaces, too.
ip_result = []
request_interface = ET.Element('get-ip-interface', xmlns=int_ns)
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sinterface' % urn):
int_type = interface.find('%sinterface-type' % urn).text
int_name = interface.find('%sinterface-name' % urn).text
if int_type == 'unknown':
continue
int_state = interface.find('%sif-state' % urn).text
int_proto_state = interface.find('%sline-protocol-state' %
urn).text
ip_address = interface.find('.//%sipv4' % urn).text
results = {'interface-type': int_type,
'interface-name': int_name,
'interface-role': None,
'if-name': None,
'interface-state': int_state,
'interface-proto-state': int_proto_state,
'interface-mac': None,
'ip-address': ip_address}
x = next((x for x in result if int_type == x['interface-type'] and
int_name == x['interface-name']), None)
if x is not None:
results.update(x)
ip_result.append(results)
return ip_result
@staticmethod
def get_interface_detail_request(last_interface_name,
last_interface_type):
""" Creates a new Netconf request based on the last received
interface name and type when the hasMore flag is true
"""
request_interface = ET.Element(
'get-interface-detail',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if last_interface_name != '':
last_received_int = ET.SubElement(request_interface,
"last-rcvd-interface")
last_int_type_el = ET.SubElement(last_received_int,
"interface-type")
last_int_type_el.text = last_interface_type
last_int_name_el = ET.SubElement(last_received_int,
"interface-name")
last_int_name_el.text = last_interface_name
return request_interface
@property
def interface_detail(self):
"""list[dict]: A list of dictionary items describing the
interface type, name, role, mac, admin and operational
state of interfaces of all rbridges.
This method currently only lists the Physical Interfaces (
Gigabitethernet, tengigabitethernet, fortygigabitethernet,
hundredgigabitethernet) and port-channel
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
has_more = ''
last_interface_name = ''
last_interface_type = ''
while (has_more == '') or (has_more == 'true'):
request_interface = self.get_interface_detail_request(
last_interface_name, last_interface_type)
interface_result = self._callback(request_interface, 'get')
has_more = interface_result.find('%shas-more' % urn).text
for item in interface_result.findall('%sinterface' % urn):
interface_type = item.find('%sinterface-type' % urn).text
interface_name = item.find('%sinterface-name' % urn).text
last_interface_type = interface_type
last_interface_name = interface_name
if "gigabitethernet" in interface_type or\
"port-channel" in interface_type:
if "gigabitethernet" in interface_type:
interface_role = item.find('%sport-role' % urn).text
else:
interface_role = "None"
if_name = item.find('%sif-name' % urn).text
interface_state = item.find('%sif-state' % urn).text
interface_proto_state = item.find('%sline-protocol-state' %
urn).text
interface_mac = item.find(
'%scurrent-hardware-address' % urn).text
item_results = {'interface-type': interface_type,
'interface-name': interface_name,
'interface-role': interface_role,
'if-name': if_name,
'interface-state': interface_state,
'interface-proto-state':
interface_proto_state,
'interface-mac': interface_mac}
result.append(item_results)
return result
@property
def switchport_list(self):
"""list[dict]:A list of dictionary items describing the details
of list of dictionary items describing the details of switch port"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
request_interface = self.get_interface_switchport_request()
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sswitchport' % urn):
vlans = []
interface_type = self.get_node_value(interface, '%sinterface-type',
urn)
interface_name = self.get_node_value(interface, '%sinterface-name',
urn)
mode = self.get_node_value(interface, '%smode', urn)
intf = interface.find('%sactive-vlans' % urn)
for vlan_node in intf.findall('%svlanid' % urn):
vlan = vlan_node.text
vlans.append(vlan)
results = {'vlan-id': vlans,
'mode': mode,
'interface-name': interface_name,
'interface_type': interface_type}
result.append(results)
return result
@property
def vlans(self):
"""list[dict]: A list of dictionary items describing the details of
vlan interfaces.
This method fetches the VLAN interfaces
Examples:
>>> import pynos.device
>>> switch = '10.24.39.202'
>>> auth = ('admin', 'password')
>>> conn = (switch, '22')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.add_vlan_int('736')
... interfaces = dev.interface.vlans
... is_vlan_interface_present = False
... for interface in interfaces:
... if interface['vlan-id'] == '736':
... is_vlan_interface_present = True
... break
... dev.interface.del_vlan_int('736')
... assert is_vlan_interface_present
True
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
has_more = ''
last_vlan_id = ''
while (has_more == '') or (has_more == 'true'):
request_interface = self.get_vlan_brief_request(last_vlan_id)
interface_result = self._callback(request_interface, 'get')
has_more = self.get_node_value(interface_result, '%shas-more', urn)
last_vlan_id = self.get_node_value(
interface_result, '%slast-vlan-id', urn)
for interface in interface_result.findall('%svlan' % urn):
vlan_id = self.get_node_value(interface, '%svlan-id', urn)
vlan_type = self.get_node_value(interface, '%svlan-type', urn)
vlan_name = self.get_node_value(interface, '%svlan-name', urn)
vlan_state = self.get_node_value(
interface, '%svlan-state', urn)
ports = []
for intf in interface.findall('%sinterface' % urn):
interface_type = self.get_node_value(
intf, '%sinterface-type', urn)
interface_name = self.get_node_value(
intf, '%sinterface-name', urn)
tag = self.get_node_value(intf, '%stag', urn)
port_results = {'interface-type': interface_type,
'interface-name': interface_name,
'tag': tag}
ports.append(port_results)
results = {'interface-name': vlan_name,
'vlan-state': vlan_state,
'vlan-id': vlan_id,
'vlan-type': vlan_type,
'interface': ports}
result.append(results)
return result
@staticmethod
def get_interface_switchport_request():
"""Creates a new Netconf request"""
request_interface = ET.Element(
'get-interface-switchport',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
return request_interface
@staticmethod
def get_vlan_brief_request(last_vlan_id):
""" Creates a new Netconf request based on the last received
vlan id when the hasMore flag is true
"""
request_interface = ET.Element(
'get-vlan-brief',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if last_vlan_id != '':
last_received_int_el = ET.SubElement(request_interface,
"last-rcvd-vlan-id")
last_received_int_el.text = last_vlan_id
return request_interface
@property
def port_channels(self):
"""list[dict]: A list of dictionary items of port channels.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.202']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name='226/0/1',
... int_type='tengigabitethernet',
... port_int='1', channel_type='standard', mode='active')
... result = dev.interface.port_channels
... is_port_channel_exist = False
... for port_chann in result:
... if port_chann['interface-name']=='port-channel-1':
... for interfaces in port_chann['interfaces']:
... for keys, values in interfaces.items():
... if '226/0/1' in values:
... is_port_channel_exist = True
... break
... output = dev.interface.remove_port_channel(
... port_int='1')
... assert is_port_channel_exist
"""
pc_urn = "{urn:brocade.com:mgmt:brocade-lag}"
result = []
has_more = ''
last_aggregator_id = ''
while (has_more == '') or (has_more == 'true'):
request_port_channel = self.get_port_chann_detail_request(
last_aggregator_id)
port_channel_result = self._callback(request_port_channel, 'get')
has_more = self.get_node_value(port_channel_result,
'%shas-more', pc_urn)
if has_more == 'true':
for x in port_channel_result.findall('%slacp' % pc_urn):
last_aggregator_id = self.get_node_value(x,
'%saggregator-id',
pc_urn)
for item in port_channel_result.findall('%slacp' % pc_urn):
interface_list = []
aggregator_id = self.get_node_value(
item, '%saggregator-id', pc_urn)
aggregator_type = self.get_node_value(
item, '%saggregator-type', pc_urn)
is_vlag = self.get_node_value(item, '%sisvlag', pc_urn)
aggregator_mode = self.get_node_value(
item, '%saggregator-mode', pc_urn)
system_priority = self.get_node_value(
item, '%ssystem-priority', pc_urn)
actor_system_id = self.get_node_value(
item, '%sactor-system-id', pc_urn)
partner_oper_priority = self.get_node_value(
item, '%spartner-oper-priority', pc_urn)
partner_system_id = self.get_node_value(
item, '%spartner-system-id', pc_urn)
admin_key = self.get_node_value(
item, '%sadmin-key', pc_urn)
oper_key = self.get_node_value(item, '%soper-key', pc_urn)
partner_oper_key = self.get_node_value(
item, '%spartner-oper-key', pc_urn)
rx_link_count = self.get_node_value(
item, '%srx-link-count', pc_urn)
tx_link_count = self.get_node_value(
item, '%stx-link-count', pc_urn)
individual_agg = self.get_node_value(
item, '%sindividual-agg', pc_urn)
ready_agg = self.get_node_value(
item, '%sready-agg', pc_urn)
for item1 in item.findall('%saggr-member' % pc_urn):
rbridge_id = self.get_node_value(
item1, '%srbridge-id', pc_urn)
int_type = self.get_node_value(
item1, '%sinterface-type', pc_urn)
int_name = self.get_node_value(
item1, '%sinterface-name', pc_urn)
actor_port = self.get_node_value(
item1, '%sactor-port', pc_urn)
sync = self.get_node_value(item1, '%ssync', pc_urn)
port_channel_interface = {'rbridge-id': rbridge_id,
'interface-type': int_type,
'interface-name': int_name,
'actor_port': actor_port,
'sync': sync}
interface_list.append(port_channel_interface)
results = {'interface-name': 'port-channel-' + aggregator_id,
'interfaces': interface_list,
'aggregator_id': aggregator_id,
'aggregator_type': aggregator_type,
'is_vlag': is_vlag,
'aggregator_mode': aggregator_mode,
'system_priority': system_priority,
'actor_system_id': actor_system_id,
'partner-oper-priority': partner_oper_priority,
'partner-system-id': partner_system_id,
'admin-key': admin_key,
'oper-key': oper_key,
'partner-oper-key': partner_oper_key,
'rx-link-count': rx_link_count,
'tx-link-count': tx_link_count,
'individual-agg': individual_agg,
'ready-agg': ready_agg}
result.append(results)
return result
@staticmethod
def get_node_value(node, node_name, urn):
value = node.find(node_name % urn)
if value is not None:
return value.text
else:
return ''
@staticmethod
def get_port_chann_detail_request(last_aggregator_id):
""" Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true
"""
port_channel_ns = 'urn:brocade.com:mgmt:brocade-lag'
request_port_channel = ET.Element('get-port-channel-detail',
xmlns=port_channel_ns)
if last_aggregator_id != '':
last_received_port_chann_el = ET.SubElement(request_port_channel,
"last-aggregator-id")
last_received_port_chann_el.text = last_aggregator_id
return request_port_channel
def bfd(self, **kwargs):
raise NotImplementedError
def vrrpe_spf_basic(self, **kwargs):
"""Set vrrpe short path forwarding to default.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
enable (bool): If vrrpe short path fowarding should be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
vrid (str): vrrpe router ID.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid` is not passed.
ValueError: if `int_type`, `name`, `vrid` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.services.vrrpe(ip_version='6',
... enable=True, rbridge_id='225')
... output = dev.interface.vrrpe_vip(int_type='ve',
... name='89', vrid='1',
... vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... output = dev.interface.vrrpe_vip(int_type='ve',
... name='89',
... vrid='1', vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... rbridge_id='225')
... output = dev.services.vrrpe(enable=False,
... rbridge_id='225')
... output = dev.interface.vrrpe_spf_basic(int_type='ve',
... name='89', vrid='1', rbridge_id='1')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
vrrpe_args = dict(name=name, vrid=vrid)
method_class = self._interface
if get:
enable = None
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
method_name = 'interface_%s_vrrpe_short_path_forwarding_basic' % \
int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
method_class = self._rbridge
vrrpe_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrpe_spf_basic = getattr(method_class, method_name)
config = vrrpe_spf_basic(**vrrpe_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*short-path-forwarding').set('operation', 'delete')
return callback(config)
def vrrpe_vip(self, **kwargs):
"""Set vrrpe VIP.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, ve, etc).
name (str): Name of interface. (1/0/5, 1/0/10, VE name etc).
vrid (str): vrrpev3 ID.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, the VIP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
vip (str): IPv4/IPv6 Virtual IP Address.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid.
Returns:
Return value of `callback`.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output =dev.interface.vrrpe_vip(int_type='ve',
... name='89', rbridge_id = '1',
... vrid='11', vip='10.0.1.10')
... output = dev.interface.vrrpe_vip(get=True,
... int_type='ve', name='89', rbridge_id = '1')
... output =dev.interface.vrrpe_vip(delete=True,
... int_type='ve', name='89', rbridge_id = '1',vrid='1',
... vip='10.0.0.10')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name',)
vip = kwargs.pop('vip', '')
get = kwargs.pop('get', False)
delete = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
if vip != '':
ipaddress = ip_interface(unicode(vip))
version = ipaddress.version
else:
version = 4
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if delete:
vrid = kwargs.pop('vrid')
rbridge_id = kwargs.pop('rbridge_id', '1')
vrrpe_args = dict(rbridge_id=rbridge_id, name=name,
vrid=vrid, virtual_ipaddr=vip)
elif get:
rbridge_id = kwargs.pop('rbridge_id', '1')
vrrpe_args = dict(name=name, vrid='', virtual_ipaddr='')
else:
vrid = kwargs.pop('vrid')
ipaddress = ip_interface(unicode(vip))
if int_type == 've':
rbridge_id = kwargs.pop('rbridge_id', '1')
vrrpe_args = dict(name=name, vrid=vrid,
virtual_ipaddr=str(ipaddress.ip))
method_name = None
method_class = self._interface
if version == 4:
vrrpe_args['version'] = '3'
method_name = 'interface_%s_vrrpe_virtual_ip_virtual_' \
'ipaddr' % int_type
elif version == 6:
method_name = 'interface_%s_ipv6_vrrpv3e_group_virtual_ip_' \
'virtual_ipaddr' % int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
if version == 6:
method_name = method_name.replace('group_', '')
method_class = self._rbridge
vrrpe_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrpe_vip = getattr(method_class, method_name)
config = vrrpe_vip(**vrrpe_args)
result = []
if delete:
config.find('.//*virtual-ip').set('operation', 'delete')
if get:
output = callback(config, handler='get_config')
for item in output.data.findall('.//{*}vrrpe'):
vrid = item.find('.//{*}vrid').text
if item.find('.//{*}virtual-ipaddr') is not None:
vip = item.find('.//{*}virtual-ipaddr').text
else:
vip = ''
tmp = {"vrid": vrid,
"vip": vip}
result.append(tmp)
else:
result = callback(config)
return result
def vrrpe_vmac(self, **kwargs):
"""Set vrrpe virtual mac.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
vrid (str): vrrpev3 ID.
enable (bool): If vrrpe virtual MAC should be enabled
or disabled.Default:``True``.
get (bool): Get config instead of editing config. (True, False)
virtual_mac (str):Virtual mac-address in the format
02e0.5200.00xx.
rbridge_id (str): rbridge-id for device. Only required
when type is 've'.
callback (function): A function executed upon completion
of the method. The only parameter passed to `callback`
will be the ``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vmac` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vmac` is invalid.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vmac` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vmac` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.services.vrrpe(enable=False,
... rbridge_id='225')
... output = dev.interface.vrrpe_vip(int_type='ve',
... name='89',vrid='1',
... vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... rbridge_id='225')
... output = dev.services.vrrpe(enable=False,
... rbridge_id='225')
... output = dev.interface.vrrpe_vmac(int_type='ve',
... name='89', vrid='1', rbridge_id='1',
... virtual_mac='aaaa.bbbb.cccc')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
virtual_mac = kwargs.pop('virtual_mac', '02e0.5200.00xx')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
if get:
enable = None
vrrpe_args = dict(name=name,
vrid=vrid,
virtual_mac=virtual_mac)
method_class = self._interface
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
method_name = 'interface_%s_vrrpe_virtual_mac' % int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
method_class = self._rbridge
vrrpe_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrpe_vmac = getattr(method_class, method_name)
config = vrrpe_vmac(**vrrpe_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*virtual-mac').set('operation', 'delete')
return callback(config)
def ve_interfaces(self, **kwargs):
"""list[dict]: A list of dictionary items describing the operational
state of ve interfaces along with the ip address associations.
Args:
rbridge_id (str): rbridge-id for device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ve_interfaces()
... output = dev.interface.ve_interfaces(rbridge_id='1')
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
rbridge_id = kwargs.pop('rbridge_id', None)
ip_result = []
request_interface = self._get_intf_rb_id(rbridge_id=rbridge_id)
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sinterface' % urn):
int_type = interface.find('%sinterface-type' % urn).text
int_name = interface.find('%sinterface-name' % urn).text
int_state = interface.find('%sif-state' % urn).text
int_proto_state = interface.find('%sline-protocol-state' %
urn).text
ip_address = interface.find('.//%sipv4' % urn).text
if_name = interface.find('%sif-name' % urn).text
results = {'interface-type': int_type,
'interface-name': int_name,
'if-name': if_name,
'interface-state': int_state,
'interface-proto-state': int_proto_state,
'ip-address': ip_address}
ip_result.append(results)
return ip_result
@staticmethod
def _get_intf_rb_id(rbridge_id):
""" Creates a new Netconf request based on the rbridge_id specifed
"""
intf_rb_id = ET.Element(
'get-ip-interface',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if rbridge_id is not None:
rbridge_el = ET.SubElement(intf_rb_id, "rbridge-id")
rbridge_el.text = rbridge_id
return intf_rb_id
def conversational_mac(self, **kwargs):
"""Enable conversational mac learning on vdx switches
Args:
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.conversational_mac()
... output = dev.interface.conversational_mac(get=True)
... output = dev.interface.conversational_mac(delete=True)
"""
callback = kwargs.pop('callback', self._callback)
mac_learning = getattr(self._mac_address_table,
'mac_address_table_learning_mode')
config = mac_learning(learning_mode='conversational')
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}learning-mode')
if item is not None:
return True
if kwargs.pop('delete', False):
config.find('.//*learning-mode').set('operation', 'delete')
return callback(config)
def add_int_vrf(self, **kwargs):
"""
Add L3 Interface in Vrf.
Args:
int_type:L3 interface type on which the vrf needs to be configured.
name:L3 interface name on which the vrf needs to be configured.
vrf_name: Vrf name with which the L3 interface needs to be
associated.
enable (bool): If vrf fowarding should be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrf` is not passed.
(int_type need not be passed if get=True)
ValueError: if `int_type`, `name`, `vrf` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.add_int_vrf(
... int_type='tengigabitethernet',
... name='225/0/38',
... vrf_name='100',
... rbridge_id='1')
... output = dev.interface.add_int_vrf(
... get=True,int_type='tengigabitethernet',
... name='225/0/38',
... vrf_name='100',
... rbridge_id='1')
... output = dev.interface.add_int_vrf(
... get=True, name='225/0/38',
... rbridge_id='1')
... output = dev.interface.add_int_vrf(
... enable=False,int_type='tengigabitethernet',
... name='225/0/39',
... vrf_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
name = kwargs.pop('name')
vrf_name = kwargs.pop('vrf_name', 'Default')
int_type = kwargs.pop('int_type').lower()
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
if get:
enable = None
vrf_args = dict(name=name, forwarding=vrf_name)
method_class = self._interface
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
method_name = 'interface_%s_vrf_forwarding' % \
int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
method_class = self._rbridge
vrf_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
add_int_vrf = getattr(method_class, method_name)
config = add_int_vrf(**vrf_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*forwarding').set('operation', 'delete')
return callback(config)
def int_ipv4_arp_aging_timout(self, **kwargs):
"""
Add "ip arp aging-time-out <>".
Args:
int_type:L3 Interface type on which the ageout time needs to be
configured.
name:L3 Interface name on which the ageout time needs to be
configured.
arp_aging_timeout: Arp age out time in <0..240>.
enable (bool): If ip arp aging time out needs to be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device.
Only required when type is 've'.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `arp_aging_timeout` is not passed.
ValueError: if `int_type`, `name`, `arp_aging_timeout` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.int_ipv4_arp_aging_timout(
... int_type='tengigabitethernet',
... name='225/0/38',
... arp_aging_timeout='20',
... rbridge_id='1')
... output = dev.interface.int_ipv4_arp_aging_timout(
... get=True,int_type='tengigabitethernet',
... name='225/0/39',
... arp_aging_timeout='40',
... rbridge_id='9')
... output = dev.interface.int_ipv4_arp_aging_timout(
... enable=False,int_type='tengigabitethernet',
... name='225/0/39',
... arp_aging_timeout='40',
... rbridge_id='9')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
arp_aging_timeout = kwargs.pop('arp_aging_timeout', '')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
ageout_args = dict(name=name, arp_aging_timeout=arp_aging_timeout)
method_class = self._interface
if get:
enable = None
else:
if (int(arp_aging_timeout) < 0) or (int(arp_aging_timeout) > 240):
raise ValueError('arp_aging_timeout must be within 0-240')
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
else:
method_name = 'interface_%s_ip_ip_config_arp_aging_timeout' % \
int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
method_class = self._rbridge
ageout_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
int_ipv4_arp_aging_timout = getattr(method_class, method_name)
config = int_ipv4_arp_aging_timout(**ageout_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*arp-aging-timeout').set('operation', 'delete')
return callback(config)
def overlay_gateway_name(self, **kwargs):
"""Configure Name of Overlay Gateway on vdx switches
Args:
gw_name: Name of Overlay Gateway
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the overlay gateway config.
(True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `gw_name` is not passed.
ValueError: if `gw_name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.overlay_gateway_name(gw_name='Leaf')
... output = dev.interface.overlay_gateway_name(get=True)
... output = dev.interface.overlay_gateway_name(gw_name='Leaf',
... delete=True)
"""
callback = kwargs.pop('callback', self._callback)
get_config = kwargs.pop('get', False)
if not get_config:
gw_name = kwargs.pop('gw_name')
overlay_gw = getattr(self._tunnels, 'overlay_gateway_name')
config = overlay_gw(name=gw_name)
if get_config:
overlay_gw = getattr(self._tunnels, 'overlay_gateway_name')
config = overlay_gw(name='')
output = callback(config, handler='get_config')
if output.data.find('.//{*}name') is not None:
gwname = output.data.find('.//{*}name').text
return gwname
else:
return None
if kwargs.pop('delete', False):
config.find('.//overlay-gateway').set('operation', 'delete')
return callback(config)
def overlay_gateway_activate(self, **kwargs):
"""Activates the Overlay Gateway Instance on VDX switches
Args:
gw_name: Name of Overlay Gateway
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the activate config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `gw_name` is not passed.
ValueError: if `gw_name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.overlay_gateway_activate(
... gw_name='Leaf')
... output = dev.interface.overlay_gateway_activate(
... get=True)
... output = dev.interface.overlay_gateway_activate(
... gw_name='Leaf', delete=True)
"""
callback = kwargs.pop('callback', self._callback)
get_config = kwargs.pop('get', False)
if not get_config:
gw_name = kwargs.pop('gw_name')
overlay_gw = getattr(self._tunnels, 'overlay_gateway_activate')
config = overlay_gw(name=gw_name)
if get_config:
overlay_gw = getattr(self._tunnels, 'overlay_gateway_activate')
config = overlay_gw(name='')
output = callback(config, handler='get_config')
if output.data.find('.//{*}name') is not None:
if output.data.find('.//{*}activate') is not None:
return True
else:
return None
else:
return None
if kwargs.pop('delete', False):
config.find('.//activate').set('operation', 'delete')
return callback(config)
def overlay_gateway_type(self, **kwargs):
"""Configure Overlay Gateway Type on vdx switches
Args:
gw_name: Name of Overlay Gateway
gw_type: Type of Overlay Gateway(hardware-vtep/
layer2-extension/nsx)
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the overlay gateway type.
(True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `gw_name`, 'gw_type' is not passed.
ValueError: if `gw_name`, 'gw_type' is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.overlay_gateway_type(gw_name='Leaf',
... gw_type='layer2-extension')
... output = dev.interface.overlay_gateway_name(get=True)
"""
callback = kwargs.pop('callback', self._callback)
get_config = kwargs.pop('get', False)
if not get_config:
gw_name = kwargs.pop('gw_name')
gw_type = kwargs.pop('gw_type')
gw_args = dict(name=gw_name, gw_type=gw_type)
overlay_gw = getattr(self._tunnels, 'overlay_gateway_gw_type')
config = overlay_gw(**gw_args)
if get_config:
overlay_gw = getattr(self._tunnels, 'overlay_gateway_gw_type')
config = overlay_gw(name='', gw_type='')
output = callback(config, handler='get_config')
if output.data.find('.//{*}name') is not None:
gwtype = output.data.find('.//{*}gw-type').text
return gwtype
else:
return None
return callback(config)
def overlay_gateway_loopback_id(self, **kwargs):
"""Configure Overlay Gateway ip interface loopback
Args:
gw_name: Name of Overlay Gateway <WORD:1-32>
loopback_id: Loopback interface Id <NUMBER: 1-255>
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the overlay gateway loop back id.
(True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `gw_name`, 'loopback_id' is not passed.
ValueError: if `gw_name`, 'loopback_id' is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.overlay_gateway_loopback_id(
... gw_name='Leaf', loopback_id='10')
... output = dev.interface.overlay_gateway_loopback_id(
... get=True)
... output = dev.interface.overlay_gateway_loopback_id(
... gw_name='Leaf', loopback_id='10', delete=True)
"""
callback = kwargs.pop('callback', self._callback)
get_config = kwargs.pop('get', False)
if not get_config:
gw_name = kwargs.pop('gw_name')
loopback_id = kwargs.pop('loopback_id')
gw_args = dict(name=gw_name, loopback_id=loopback_id)
overlay_gw = getattr(self._tunnels, 'overlay_gateway_ip_'
'interface_loopback_loopback_id')
config = overlay_gw(**gw_args)
if get_config:
overlay_gw = getattr(self._tunnels, 'overlay_gateway_ip_'
'interface_loopback_loopback_id')
config = overlay_gw(name='', loopback_id='')
output = callback(config, handler='get_config')
if output.data.find('.//{*}name') is not None:
if output.data.find('.//{*}loopback-id') is not None:
ip_intf = output.data.find('.//{*}loopback-id').text
return ip_intf
else:
return None
else:
return None
if kwargs.pop('delete', False):
config.find('.//loopback-id').set('operation', 'delete')
return callback(config)
def overlay_gateway_vlan_vni_auto(self, **kwargs):
"""Configure Overlay Gateway Vlan VNI mapping auto on VDX switches
Args:
gw_name: Name of Overlay Gateway
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete vlan to vni auto mapping. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `gw_name` is not passed.
ValueError: if `gw_name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.overlay_gateway_vlan_vni_auto(
... gw_name='Leaf')
... output = dev.interface.overlay_gateway_vlan_vni_auto(
... get=True)
... output = dev.interface.overlay_gateway_vlan_vni_auto(
... gw_name='Leaf', delete=True)
"""
callback = kwargs.pop('callback', self._callback)
get_config = kwargs.pop('get', False)
if not get_config:
gw_name = kwargs.pop('gw_name')
overlay_gw = getattr(self._tunnels, 'overlay_gateway_map_'
'vlan_vni_auto')
config = overlay_gw(name=gw_name)
if get_config:
overlay_gw = getattr(self._tunnels, 'overlay_gateway_map_'
'vlan_vni_auto')
config = overlay_gw(name='')
output = callback(config, handler='get_config')
if output.data.find('.//{*}name') is not None:
if output.data.find('.//{*}map') is not None:
return True
else:
return None
else:
return None
if kwargs.pop('delete', False):
config.find('.//map').set('operation', 'delete')
return callback(config)
def overlay_gateway_attach_rbridge_id(self, **kwargs):
"""Configure Overlay Gateway attach rbridge id
Args:
gw_name: Name of Overlay Gateway <WORD:1-32>
rbridge_id: Single or range of rbridge id to be added/removed
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the attached rbridge list
(True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `gw_name`, 'rbridge_id' is not passed.
ValueError: if `gw_name`, 'rbridge_id' is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.overlay_gateway_attach_rbridge_id(
... gw_name='Leaf', rbridge_id='10')
... output = dev.interface.overlay_gateway_attach_rbridge_id(
... get=True)
... output = dev.interface.overlay_gateway_attach_rbridge_id(
... gw_name='Leaf', rbridge_id='1-2', delete=True)
"""
callback = kwargs.pop('callback', self._callback)
get_config = kwargs.pop('get', False)
delete = kwargs.pop('delete', False)
if not get_config:
gw_name = kwargs.pop('gw_name')
rbridge_id = kwargs.pop('rbridge_id')
if delete is True:
gw_args = dict(name=gw_name, rb_remove=rbridge_id)
overlay_gw = getattr(self._tunnels, 'overlay_gateway_'
'attach_rbridge_id_rb_remove')
config = overlay_gw(**gw_args)
else:
gw_args = dict(name=gw_name, rb_add=rbridge_id)
overlay_gw = getattr(self._tunnels, 'overlay_gateway_'
'attach_rbridge_id_rb_add')
config = overlay_gw(**gw_args)
if get_config:
overlay_gw = getattr(self._tunnels, 'overlay_gateway_'
'attach_rbridge_id_rb_add')
config = overlay_gw(name='', rb_add='')
output = callback(config, handler='get_config')
if output.data.find('.//{*}name') is not None:
if output.data.find('.//{*}attach') is not None:
output.data.find('.//{*}name').text
rb_id = output.data.find('.//{*}rb-add').text
return rb_id
else:
return None
else:
return None
return callback(config)
def ipv6_link_local(self, **kwargs):
"""Configure ipv6 link local address on interfaces on vdx switches
Args:
int_type: Interface type on which the ipv6 link local needs to be
configured.
name: 'Ve' or 'loopback' interface name.
rbridge_id (str): rbridge-id for device.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name` is not passed.
ValueError: if `int_type`, `name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ipv6_link_local(name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(get=True,name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(delete=True,
... name='500', int_type='ve', rbridge_id='1')
"""
int_type = kwargs.pop('int_type').lower()
ve_name = kwargs.pop('name')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['loopback', 've']
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
link_args = dict(name=ve_name, rbridge_id=rbridge_id,
int_type=int_type)
method_name = 'rbridge_id_interface_%s_ipv6_ipv6_config_address_' \
'use_link_local_only' % int_type
method_class = self._rbridge
v6_link_local = getattr(method_class, method_name)
config = v6_link_local(**link_args)
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}use-link-local-only')
if item is not None:
return True
if kwargs.pop('delete', False):
config.find('.//*use-link-local-only').set('operation', 'delete')
return callback(config)
def fabric_neighbor(self, **kwargs):
"""Set fabric neighbor discovery state.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
enabled (bool): Is fabric neighbor discovery enabled? (True, False)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `enabled` is not specified.
ValueError: if `int_type`,`name`, or `enabled` is not a valid value
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.fabric_neighbor(
... int_type='tengigabitethernet',
... name='225/0/40')
... output = dev.interface.fabric_neighbor(
... int_type='tengigabitethernet',
... name='225/0/40',
... enabled=False)
... output = dev.interface.fabric_neighbor(
... get=True, int_type='tengigabitethernet',
... name='225/0/40',
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
int_types = [
'tengigabitethernet',
'fortygigabitethernet',
'hundredgigabitethernet'
]
if int_type not in int_types:
raise ValueError("`int_type` must be one of: %s" %
repr(int_types))
if not isinstance(enabled, bool):
raise ValueError('`enabled` must be `True` or `False`.')
fabric_isl_args = dict(name=name)
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError("`name` must match `^[0-9]{1,3}/[0-9]{1,3}/[0-9]"
"{1,3}$`")
config = getattr(
self._interface,
'interface_%s_fabric_neighbor_discovery_disable' % int_type
)(**fabric_isl_args)
if not enabled:
fabric_isl = config.find('.//*neighbor-discovery')
fabric_isl.set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config)
def create_ve(self, **kwargs):
"""
Add Ve Interface .
Args:
ve_name: Ve name with which the Ve interface needs to be
created.
enable (bool): If vrf fowarding should be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
ValueError: if `ve_name` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.create_ve(
... ve_name='100',
... rbridge_id='1')
... output = dev.interface.create_ve(
... get=True,
... ve_name='100',
... rbridge_id='1')
... output = dev.interface.create_ve(
... get=True,
... rbridge_id='1')
... output = dev.interface.create_ve(
... enable=False,
... ve_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ve_name = kwargs.pop('ve_name', '')
rbridge_id = kwargs.pop('rbridge_id', '1')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
callback = kwargs.pop('callback', self._callback)
ve_args = dict(name=ve_name, rbridge_id=rbridge_id)
if get:
enable = None
method_class = self._rbridge
method_name = 'rbridge_id_interface_ve_name'
create_ve = getattr(method_class, method_name)
config = create_ve(**ve_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*ve').set('operation', 'delete')
return callback(config)
def port_profile_port(self,inter_type, inter,enable=True):
"""
Activates the Automatic Migration of Port Profiles (AMPP) port-profile configuration mode on a port.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
enable: (bool) Enables port_profile mdode by default. If set to False
disables the port-profile mode.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element("config")
interface = ET.SubElement(config, "interface", xmlns="urn:brocade.com:mgmt:brocade-interface")
tengigabitethernet = ET.SubElement(interface, inter_type)
name_key = ET.SubElement(tengigabitethernet, "name")
name_key.text = inter
if enable:
port_profile_port = ET.SubElement(tengigabitethernet, "port-profile-port",
xmlns="urn:brocade.com:mgmt:brocade-port-profile")
else:
port_profile_port = ET.SubElement(tengigabitethernet, "port-profile-port",
xmlns="urn:brocade.com:mgmt:brocade-port-profile",
operation='delete')
try:
if enable:
self._callback(config)
return True
else:
self._callback(config)
return True
except Exception as e:
logging.error(e)
return False
| 43.04379
| 108
| 0.530576
| 19,201
| 179,880
| 4.803864
| 0.033071
| 0.036124
| 0.029857
| 0.007893
| 0.838745
| 0.80453
| 0.764438
| 0.733529
| 0.708952
| 0.680623
| 0
| 0.022842
| 0.35601
| 179,880
| 4,178
| 109
| 43.054093
| 0.773414
| 0.429898
| 0
| 0.638534
| 0
| 0.002221
| 0.187771
| 0.050063
| 0
| 0
| 0
| 0.00383
| 0
| 1
| 0.039423
| false
| 0.002776
| 0.005552
| 0
| 0.107163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
606d5c0ca79f0623754450da821409afa65a7bcf
| 191
|
py
|
Python
|
problems/shortest_word.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
problems/shortest_word.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
problems/shortest_word.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
## Shortest Word
## 7 kyu
## https://www.codewars.com/kata/57cebe1dc6fdc20c57000ac9
def find_short(s):
# your code here
return sorted([len(word) for word in s.split()])[0]
| 23.875
| 59
| 0.649215
| 27
| 191
| 4.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092105
| 0.204188
| 191
| 8
| 60
| 23.875
| 0.717105
| 0.47644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
60747afcf4cf8ea5cfe57874b36343c3bbdf66b1
| 103
|
py
|
Python
|
6 kyu/English beggars.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/English beggars.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/English beggars.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
def beggars(values, n):
return [sum(values[j] for j in range(i, len(values), n)) for i in range(n)]
| 51.5
| 79
| 0.650485
| 21
| 103
| 3.190476
| 0.571429
| 0.208955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174757
| 103
| 2
| 79
| 51.5
| 0.788235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
60a7a0c81eab3e2e8fbfe7f9a34ec211d4ed0f28
| 10,059
|
py
|
Python
|
tests/test_classifier_archive.py
|
tsalvia/karton-classifier
|
84c642c666334db3df66b98752784c2044aa3c2d
|
[
"BSD-3-Clause"
] | 5
|
2021-01-04T10:18:36.000Z
|
2021-11-08T10:29:17.000Z
|
tests/test_classifier_archive.py
|
tsalvia/karton-classifier
|
84c642c666334db3df66b98752784c2044aa3c2d
|
[
"BSD-3-Clause"
] | 12
|
2021-02-15T12:18:13.000Z
|
2022-01-11T20:12:41.000Z
|
tests/test_classifier_archive.py
|
tsalvia/karton-classifier
|
84c642c666334db3df66b98752784c2044aa3c2d
|
[
"BSD-3-Clause"
] | 9
|
2021-02-01T14:06:57.000Z
|
2022-02-24T09:44:37.000Z
|
import pytest
from karton.core import Task
from karton.core.test import ConfigMock, KartonBackendMock, KartonTestCase
from .mock_helper import mock_resource, mock_task
@pytest.mark.usefixtures("karton_classifier")
class TestClassifier(KartonTestCase):
def setUp(self):
self.config = ConfigMock()
self.backend = KartonBackendMock()
def test_process_archive_7z(self):
resource = mock_resource("archive.7z")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-7z-compressed",
"extension": "7z",
},
payload={
"sample": resource,
"tags": ["archive:7z"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_ace(self):
resource = mock_resource("archive.ace")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/octet-stream",
"extension": "ace",
},
payload={
"sample": resource,
"tags": ["archive:ace"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_bz2(self):
resource = mock_resource("archive.bz2")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-bzip2",
"extension": "bz2",
},
payload={
"sample": resource,
"tags": ["archive:bz2"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_cab(self):
resource = mock_resource("archive.cab")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/vnd.ms-cab-compressed",
"extension": "cab",
},
payload={
"sample": resource,
"tags": ["archive:cab"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_cab_with_extension(self):
resource = mock_resource("archive.cab", with_name=True)
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/vnd.ms-cab-compressed",
"extension": "cab",
},
payload={
"sample": resource,
"tags": ["archive:cab"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_gz(self):
resource = mock_resource("archive.gz")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/gzip",
"extension": "gz",
},
payload={
"sample": resource,
"tags": ["archive:gz"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_iso(self):
resource = mock_resource("archive.iso")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-iso9660-image",
"extension": "iso",
},
payload={
"sample": resource,
"tags": ["archive:iso"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_lz(self):
resource = mock_resource("archive.lz")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-lzip",
"extension": "lz",
},
payload={"sample": resource, "tags": ["archive:lz"], "magic": magic},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_rar(self):
resource = mock_resource("archive.rar")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-rar",
"extension": "rar",
},
payload={
"sample": resource,
"tags": ["archive:rar"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_tar(self):
resource = mock_resource("archive.tar")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-tar",
"extension": "tar",
},
payload={
"sample": resource,
"tags": ["archive:tar"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_udf(self):
resource = mock_resource("archive.udf")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-iso9660-image",
"extension": "udf",
},
payload={
"sample": resource,
"tags": ["archive:udf"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_xz(self):
resource = mock_resource("archive.xz")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/x-xz",
"extension": "xz",
},
payload={
"sample": resource,
"tags": ["archive:xz"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
def test_process_archive_zip(self):
resource = mock_resource("archive.zip")
magic = self.magic_from_content(resource.content, mime=False)
res = self.run_task(mock_task(resource))
expected = Task(
headers={
"type": "sample",
"stage": "recognized",
"origin": "karton.classifier",
"quality": "high",
"kind": "archive",
"mime": "application/zip",
"extension": "zip",
},
payload={
"sample": resource,
"tags": ["archive:zip"],
"magic": magic,
},
)
self.assertTasksEqual(res, [expected])
| 32.659091
| 81
| 0.474202
| 817
| 10,059
| 5.70257
| 0.088127
| 0.050225
| 0.039064
| 0.058596
| 0.879588
| 0.730844
| 0.707448
| 0.707448
| 0.707448
| 0.707448
| 0
| 0.002943
| 0.391888
| 10,059
| 307
| 82
| 32.765472
| 0.758705
| 0
| 0
| 0.625899
| 0
| 0
| 0.18819
| 0.017
| 0
| 0
| 0
| 0
| 0.046763
| 1
| 0.05036
| false
| 0
| 0.014388
| 0
| 0.068345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60ab0deb85ecb497c28fbd8bebf809ee2ba99280
| 49,402
|
py
|
Python
|
utils/add_mendelian_annotations_refactor.py
|
jianxinwang/GenEsysV
|
444b4685b603c3da5ac9d9265817cac98c33c97e
|
[
"BSD-3-Clause"
] | 14
|
2019-02-27T13:57:36.000Z
|
2021-10-01T14:29:38.000Z
|
utils/add_mendelian_annotations_refactor.py
|
jianxinwang/GenEsysV
|
444b4685b603c3da5ac9d9265817cac98c33c97e
|
[
"BSD-3-Clause"
] | 11
|
2019-06-13T06:38:43.000Z
|
2021-06-15T13:51:39.000Z
|
utils/add_mendelian_annotations_refactor.py
|
jianxinwang/GenEsysV
|
444b4685b603c3da5ac9d9265817cac98c33c97e
|
[
"BSD-3-Clause"
] | 5
|
2019-02-27T13:57:40.000Z
|
2020-02-09T13:15:06.000Z
|
import elasticsearch
autosomal_recessive_vep_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["1/1", "1|1"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}},
{"terms": {"sample.Mother_Genotype": ["0/1", "0|1", "1|0"]}},
{"terms": {"sample.Father_Genotype": ["0/1", "0|1", "1|0"]}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
autosomal_recessive_annovar_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["1/1", "1|1"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}},
{"terms": {"sample.Mother_Genotype": ["0/1", "0|1", "1|0"]}},
{"terms": {"sample.Father_Genotype": ["0/1", "0|1", "1|0"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
}
}"""
denovo_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}},
{"term": {"sample.Mother_Genotype": "0/0"}},
{"term": {"sample.Father_Genotype": "0/0"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
autosomal_dominant_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}}
],
"should" :[
{"term": {"sample.Mother_Phenotype": "2"}},
{"term": {"sample.Father_Phenotype": "2"}}
],
"minimum_should_match": 1
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
compound_heterozygous_vep_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}},
{"term": {"CSQ_nested.SYMBOL": "%s"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
compound_heterozygous_annovar_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "AAChange_refGene",
"query": {
"bool": {
"filter": [
{"term": {"AAChange_refGene.Gene": "%s"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
}
}"""
x_linked_dominant_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
],
"should" :[
{"term": {"sample.Mother_Phenotype": "2"}},
{"term": {"sample.Father_Phenotype": "2"}}
],
"minimum_should_match": 1
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
]
}
}
}"""
x_linked_recessive_vep_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
]
}
}
}"""
x_linked_recessive_annovar_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
}
}"""
x_linked_de_novo_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
]
}
}
}"""
import elasticsearch
from elasticsearch import helpers
import pprint
import json
from natsort import natsorted
def is_autosomal_dominant(sample_information):
if sample_information.get('Mother_Phenotype') == '2' and sample_information.get('Father_Phenotype') == '2':
return False
if sample_information.get('Mother_Phenotype') == '2':
if sample_information.get('Mother_Genotype') in ['0/1', '0|1', '1|0'] and sample_information.get('Father_Genotype') in ['0/0', '0|0']:
return True
# Case Father (Phenotype == 2)
elif sample_information.get('Father_Phenotype') == '2':
if sample_information.get('Mother_Genotype') in ['0/0', '0|0'] and sample_information.get('Father_Genotype') in ['0/1', '0|1', '1|0']:
return True
return False
def is_x_linked_dominant(sample_information):
if sample_information.get('Mother_Phenotype') == '2' and sample_information.get('Father_Phenotype') == '2':
return False
if sample_information.get('Sex') == '1':
if (sample_information.get('GT') in ["0/1", "0|1", "1|0", "1/1", "1|1", "1", "./1", ".|1", "1|."] and
sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Phenotype') == "2" and
sample_information.get('Father_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0", "0/0", "0|0"]):
return True
elif sample_information.get('Sex') == '2':
if sample_information.get('GT') in ["0/1", "0|1", "1|0"]:
if (sample_information.get('Mother_Genotype') in ["0/0", "0|0"] and
sample_information.get('Father_Genotype') in ["0/1", "0|1", "1|0", "1", "./1", ".|1", "1|."] and
sample_information.get('Father_Phenotype') == "2" and
sample_information.get('Mother_Phenotype') == "1"):
return True
elif (sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Father_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Phenotype') and
sample_information.get('Father_Phenotype') == "1"):
return True
return False
def is_x_linked_recessive(sample_information):
if sample_information.get('Sex') == '1':
if (sample_information.get('GT') not in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Phenotype') == "1"):
return True
elif sample_information.get('Sex') == '2':
if (sample_information.get('GT') in ["1|1", "1/1"] and
sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0/1", "0|1", "1|0", "1", "./1", ".|1", "1|."] and
sample_information.get('Father_Phenotype') == "2"):
return True
return False
def is_x_linked_denovo(sample_information):
if sample_information.get('Sex') == '1':
if (sample_information.get('GT') in ["0/1", "0|1", "1|0", "1/1", "1|1", "1"] and
sample_information.get('Mother_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Father_Phenotype') == "1"):
return True
elif sample_information.get('Sex') == '2':
if (sample_information.get('GT') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Father_Phenotype') == "1"):
return True
return False
def get_vep_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name):
compound_heterozygous_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant", "CSQ_nested"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
},
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "CSQ_nested"
},
"aggs" : {
"values" : {"terms" : {"field" : "CSQ_nested.SYMBOL", "size" : 30000}}
}
}
}
}"""
results = es.search(index=index_name, doc_type=doc_type_name,
body=compound_heterozygous_query_body_template, request_timeout=120)
return natsorted([ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']])
def get_annovar_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name):
compound_heterozygous_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant", "CSQ_nested"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
},
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "AAChange_refGene"
},
"aggs" : {
"values" : {"terms" : {"field" : "AAChange_refGene.Gene", "size" : 30000}}
}
}
}
}"""
results = es.search(index=index_name, doc_type=doc_type_name,
body=compound_heterozygous_query_body_template, request_timeout=120)
return natsorted([ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']])
def get_values_from_es(es, index_name, doc_type_name, field_es_name, field_path):
if not field_path:
body_non_nested_template = """
{
"size": 0,
"aggs" : {
"values" : {
"terms" : { "field" : "%s", "size" : 30000 }
}
}
}
"""
body = body_non_nested_template % (field_es_name)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
return [ele['key'] for ele in results["aggregations"]["values"]["buckets"] if ele['key']]
elif field_path:
body_nested_template = """
{
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "%s"
},
"aggs" : {
"values" : {"terms" : {"field" : "%s.%s", "size" : 30000}}
}
}
}
}
"""
body = body_nested_template % (field_path,
field_path,
field_es_name)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
return [ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']]
def get_family_dict(es, index_name, doc_type_name):
family_ids = get_values_from_es(es, index_name, doc_type_name, 'Family_ID', 'sample')
family_dict = {}
body_template = """
{
"_source": false,
"size": 1,
"query": {
"nested": {
"path": "sample",
"score_mode": "none",
"query": {
"bool": {
"must" : [{"term": { "sample.Family_ID": "%s"}},
{"exists": { "field": "sample.Father_ID"}},
{"exists": { "field": "sample.Mother_ID"}}
]
}
},
"inner_hits": {}
}
}
}
"""
family_dict = {}
for family_id in family_ids:
body = body_template % (family_id)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
result = results['hits']['hits'][0]['inner_hits']['sample']['hits']['hits'][0]["_source"]
father_id = result.get('Father_ID')
mother_id = result.get('Mother_ID')
child_id = result.get('Sample_ID')
child_sex = result.get('Sex')
family_dict[family_id] = {'father_id': father_id,
'mother_id': mother_id, 'child_id': child_id, 'child_sex': child_sex}
return family_dict
def pop_sample_with_id(sample_array, sample_id):
saved_index = 0
for index, sample in enumerate(sample_array):
if sample.get('Sample_ID') == sample_id:
saved_index = index
sample = sample_array.pop(saved_index)
return sample
def pop_sample_with_id_apply_compound_het_rules(sample_array, sample_id):
saved_index = 0
for index, sample in enumerate(sample_array):
if sample.get('Sample_ID') == sample_id:
saved_index = index
sample = sample_array.pop(saved_index)
if (sample.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample.get('Father_Genotype') in ["0/0", "0|0"]):
return sample
elif (sample.get('Mother_Genotype') in ["0/0", "0|0"] and
sample.get('Father_Genotype') in ["0/1", "0|1", "1|0"]):
return sample
return None
def are_variants_compound_heterozygous(variants):
compound_heterozygous_found = False
gt_pair_whose_reverse_to_find = None
compound_heterozygous_variants = []
for variant in variants:
father_gt = variant.get('Father_Genotype')
mother_gt = variant.get('Mother_Genotype')
sum_digits = sum([int(char)
for char in father_gt + mother_gt if char.isdigit()])
if sum_digits != 1:
continue
if not gt_pair_whose_reverse_to_find:
gt_pair_whose_reverse_to_find = [father_gt, mother_gt]
compound_heterozygous_variants.append(variant)
continue
current_gt_pair = [father_gt, mother_gt]
current_gt_pair.reverse()
if gt_pair_whose_reverse_to_find == current_gt_pair:
compound_heterozygous_variants.append(variant)
compound_heterozygous_found = True
if compound_heterozygous_found:
return compound_heterozygous_variants
else:
return False
def annotate_autosomal_recessive(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
if annotation == 'vep':
query_body = autosomal_recessive_vep_query_body_template % (child_id)
elif annotation == 'annovar':
query_body = autosomal_recessive_annovar_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'autosomal_recessive' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'autosomal_recessive' not in mendelian_diseases:
mendelian_diseases.append('autosomal_recessive')
to_update = True
else:
to_update = True
sample['mendelian_diseases'] = ['autosomal_recessive']
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} autosomal_recessive samples'.format(len(list(set(sample_matched)))))
def annotate_denovo(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = denovo_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'denovo' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'denovo' not in mendelian_diseases:
mendelian_diseases.append('denovo')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['denovo']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} denovo samples'.format(len(list(set(sample_matched)))))
def annotate_autosomal_dominant(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = autosomal_dominant_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
mendelian_diseases = sample.get('mendelian_diseases', [])
tmp_id = es_id + child_id
if 'autosomal_dominant' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_autosomal_dominant(sample):
to_update = False
if mendelian_diseases:
if 'autosomal_dominant' not in mendelian_diseases:
mendelian_diseases.append('autosomal_dominant')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['autosomal_dominant']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} autosomal dominant samples'.format(len(list(set(sample_matched)))))
range_rules = {
'hg19/GRCh37': ([60001, 2699520], [154931044, 155260560]),
'hg38/GRCh38': ([10001, 2781479], [155701383, 156030895])
}
24, 382, 427
def annotate_x_linked_dominant(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = x_linked_dominant_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1])
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
# print(es_id)
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_dominant' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_dominant(sample):
to_update = False
if mendelian_diseases:
if 'x_linked_dominant' not in mendelian_diseases:
mendelian_diseases.append('x_linked_dominant')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_dominant']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_dominant samples'.format(len(list(set(sample_matched)))))
def annotate_x_linked_recessive(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
if annotation == 'vep':
query_body = x_linked_recessive_vep_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1]
)
elif annotation == 'annovar':
query_body = x_linked_recessive_annovar_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1]
)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_recessive' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_recessive(sample):
# sample['mendelian_diseases'] = 'x_linked_recessive'
to_update = False
if mendelian_diseases:
if 'x_linked_recessive' not in mendelian_diseases:
mendelian_diseases.append('x_linked_recessive')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_recessive']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
# if to_update:
# es.update(index=index_name, doc_type=doc_type_name, id=es_id,
# body={"doc": {"sample": sample_array}})
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_recessive samples'.format(len(list(set(sample_matched)))))
def annotate_x_linked_denovo(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = x_linked_de_novo_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1])
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_denovo' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_denovo(sample):
mendelian_diseases = sample.get('mendelian_diseases')
to_update = False
if mendelian_diseases:
if 'x_linked_denovo' not in mendelian_diseases:
mendelian_diseases.append('x_linked_denovo')
print(type(mendelian_diseases), es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_denovo']
to_update = True
tmp_id = es_id + child_id
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_denovo samples'.format(len(list(set(sample_matched)))))
def annotate_compound_heterozygous(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
child_id = family.get('child_id')
if annotation == 'vep':
genes = get_vep_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name)
elif annotation == 'annovar':
genes = get_annovar_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name)
for gene in genes:
if annotation == 'vep':
query_body = compound_heterozygous_vep_query_body_template % (child_id, gene)
elif annotation == 'annovar':
query_body = compound_heterozygous_annovar_query_body_template % (child_id, gene)
query_body = json.loads(query_body)
samples = []
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id_apply_compound_het_rules(sample_array, child_id)
if not sample:
continue
sample.update({'es_id': es_id})
samples.append(sample)
actions = []
count = 0
if len(samples) > 1 and are_variants_compound_heterozygous(samples):
for sample in samples:
es_id = sample.pop("es_id")
es_document = es.get(index_name, doc_type_name, es_id)
sample_array = es_document["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
mendelian_diseases = sample.get('mendelian_diseases', [])
tmp_id = es_id + child_id
if 'compound_heterozygous' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'compound_heterozygous' not in mendelian_diseases:
mendelian_diseases.append('compound_heterozygous')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['compound_heterozygous']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} compound_heterozygous samples'.format(len(list(set(sample_matched)))))
def main():
import datetime
index_name = "ashkenazitrio4families"
doc_type_name = "ashkenazitrio4families_"
annotation = 'vep'
es = elasticsearch.Elasticsearch(host='199.109.192.181', port=9200)
family_dict = get_family_dict(es, index_name, doc_type_name)
pprint.pprint(family_dict)
all_start_time = datetime.datetime.now()
start_time = datetime.datetime.now()
print('Starting annotate_autosomal_recessive', start_time)
annotate_autosomal_recessive(es, index_name, doc_type_name, family_dict, annotation)
print('Finished annotate_autosomal_recessive', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_denovo', start_time)
annotate_denovo(es, index_name, doc_type_name, family_dict)
print('Finished annotate_denovo', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_autosomal_dominant', start_time)
annotate_autosomal_dominant(es, index_name, doc_type_name, family_dict)
print('Finished annotate_autosomal_dominant', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_x_linked_dominant', start_time)
annotate_x_linked_dominant(es, index_name, doc_type_name, family_dict)
print('Finished annotate_x_linked_dominant', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_x_linked_recessive', start_time)
annotate_x_linked_recessive(es, index_name, doc_type_name, family_dict, annotation)
print('Finished annotate_x_linked_recessive', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_x_linked_denovo', start_time)
annotate_x_linked_denovo(es, index_name, doc_type_name, family_dict)
print('Finished annotate_x_linked_denovo', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_compound_heterozygous', start_time)
annotate_compound_heterozygous(es, index_name, doc_type_name, family_dict, annotation)
print('Finished annotate_compound_heterozygous', int(
(datetime.datetime.now() - start_time).total_seconds()), 'seconds')
print('Finished annotating all in ', int((datetime.datetime.now() - all_start_time).total_seconds()), 'seconds')
if __name__ == "__main__":
main()
| 35.772629
| 211
| 0.467977
| 4,617
| 49,402
| 4.709335
| 0.047433
| 0.02525
| 0.044152
| 0.026491
| 0.883917
| 0.861611
| 0.838017
| 0.829232
| 0.808214
| 0.782459
| 0
| 0.020529
| 0.398526
| 49,402
| 1,380
| 212
| 35.798551
| 0.711214
| 0.011153
| 0
| 0.675087
| 0
| 0.021777
| 0.421131
| 0.052286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016551
| false
| 0
| 0.006098
| 0
| 0.045296
| 0.026132
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60ade96cc4d598a5845e7805f15603b8aacbcd4d
| 48
|
py
|
Python
|
tello/__init__.py
|
gengliangyu2008/Intelligent-Navigation-Systems
|
b978ea48d8e8f5fe61a272c74741a9693eb1a997
|
[
"MIT"
] | null | null | null |
tello/__init__.py
|
gengliangyu2008/Intelligent-Navigation-Systems
|
b978ea48d8e8f5fe61a272c74741a9693eb1a997
|
[
"MIT"
] | null | null | null |
tello/__init__.py
|
gengliangyu2008/Intelligent-Navigation-Systems
|
b978ea48d8e8f5fe61a272c74741a9693eb1a997
|
[
"MIT"
] | null | null | null |
from .utils import *
from .TelloClient import *
| 16
| 26
| 0.75
| 6
| 48
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 27
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7162605fcea12712e24d3836a72b505726184800
| 24
|
py
|
Python
|
videos/082_avoiding_import_loops_in_python/example3-subpackage-init/fix2/mypkg/subpkg_b/__init__.py
|
matthewstidham/VideosSampleCode
|
122edfbf5a805930bb031dad584e79a6c487ba7a
|
[
"MIT"
] | 1
|
2022-02-16T05:24:12.000Z
|
2022-02-16T05:24:12.000Z
|
videos/082_avoiding_import_loops_in_python/example3-subpackage-init/broken/mypkg/subpkg_b/__init__.py
|
bandirevanth/VideosSampleCode
|
9eec145ac6fffb11d2a51356fb65d74ecd721d50
|
[
"MIT"
] | null | null | null |
videos/082_avoiding_import_loops_in_python/example3-subpackage-init/broken/mypkg/subpkg_b/__init__.py
|
bandirevanth/VideosSampleCode
|
9eec145ac6fffb11d2a51356fb65d74ecd721d50
|
[
"MIT"
] | null | null | null |
from .module_b import B
| 12
| 23
| 0.791667
| 5
| 24
| 3.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
718213a1e19f1c828f3dd016c2d86381f08c66db
| 2,714
|
py
|
Python
|
backend/app/controllers/news_feed_controller.py
|
2110521-2563-1-Software-Architecture/TBD-Project
|
7854c951a631b6f3667af7c50e017a3652fd15a9
|
[
"MIT"
] | 1
|
2020-10-26T15:38:58.000Z
|
2020-10-26T15:38:58.000Z
|
backend/app/controllers/news_feed_controller.py
|
2110521-2563-1-Software-Architecture/TBD-Project
|
7854c951a631b6f3667af7c50e017a3652fd15a9
|
[
"MIT"
] | null | null | null |
backend/app/controllers/news_feed_controller.py
|
2110521-2563-1-Software-Architecture/TBD-Project
|
7854c951a631b6f3667af7c50e017a3652fd15a9
|
[
"MIT"
] | null | null | null |
from app.controllers.base import Controller
from app.models.news_feed import NewsFeed
from app.models.user import User
import json
class NewsFeedController(Controller):
async def get(self, request):
try:
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).get_news_feed(current_user,
json.loads(request.headers.get('page')))
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def create(self, request):
try:
payload = await request.json()
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).create(current_user, **payload)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def update(self, request):
try:
payload = await request.json()
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).update(current_user, **payload)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def delete(self, request):
try:
current_user = await User(request.app).get_user(request.headers.get('User'))
news_feed_id = request.headers.get('target')
response = await NewsFeed(request.app).delete(current_user, news_feed_id)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
async def interact(self, request):
try:
payload = await request.json()
current_user = await User(request.app).get_user(request.headers.get('User'))
response = await NewsFeed(request.app).interact(current_user, **payload)
await self.write(request, self.json_response(response))
except:
response = {'status':'Bad Request.', 'reason':'Controller rejected.'}
await self.write(request, self.json_response(response))
| 48.464286
| 88
| 0.638909
| 307
| 2,714
| 5.543974
| 0.127036
| 0.06463
| 0.082256
| 0.123384
| 0.816686
| 0.798472
| 0.798472
| 0.798472
| 0.798472
| 0.798472
| 0
| 0
| 0.240236
| 2,714
| 56
| 89
| 48.464286
| 0.825412
| 0
| 0
| 0.66
| 0
| 0
| 0.092081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71d34f5cbc5b2d0dd14b66843949ec2bcf950aa6
| 8,023
|
py
|
Python
|
dfirtrack_config/tests/system/test_system_exporter_spreadsheet_xls_config_forms.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 273
|
2018-04-18T22:09:15.000Z
|
2021-06-04T09:15:48.000Z
|
dfirtrack_config/tests/system/test_system_exporter_spreadsheet_xls_config_forms.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 75
|
2018-08-31T11:05:37.000Z
|
2021-06-08T14:15:07.000Z
|
dfirtrack_config/tests/system/test_system_exporter_spreadsheet_xls_config_forms.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 61
|
2018-11-12T22:55:48.000Z
|
2021-06-06T15:16:16.000Z
|
from django.test import TestCase
from dfirtrack_config.forms import SystemExporterSpreadsheetXlsConfigForm
class SystemExporterSpreadsheetXlsConfigFormTestCase(TestCase):
"""system exporter spreadsheet XLS config form tests"""
def test_system_exporter_spreadsheet_xls_config_spread_xls_system_id_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_system_id'].label, 'Export system ID')
def test_system_exporter_spreadsheet_xls_config_spread_xls_dnsname_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_dnsname'].label, 'Export DNS name')
def test_system_exporter_spreadsheet_xls_config_spread_xls_domain_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_domain'].label, 'Export domain')
def test_system_exporter_spreadsheet_xls_config_spread_xls_systemstatus_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_systemstatus'].label, 'Export systemstatus'
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_analysisstatus_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_analysisstatus'].label, 'Export analysisstatus'
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_reason_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_reason'].label, 'Export reason')
def test_system_exporter_spreadsheet_xls_config_spread_xls_recommendation_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_recommendation'].label, 'Export recommendation'
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_systemtype_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_systemtype'].label, 'Export systemtype'
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_ip_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_ip'].label, 'Export IP')
def test_system_exporter_spreadsheet_xls_config_spread_xls_os_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_os'].label, 'Export OS')
def test_system_exporter_spreadsheet_xls_config_spread_xls_company_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_company'].label, 'Export company')
def test_system_exporter_spreadsheet_xls_config_spread_xls_location_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_location'].label, 'Export location')
def test_system_exporter_spreadsheet_xls_config_spread_xls_serviceprovider_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_serviceprovider'].label, 'Export serviceprovider'
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_tag_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_tag'].label, 'Export tag')
def test_system_exporter_spreadsheet_xls_config_spread_xls_case_form_label(self):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(form.fields['spread_xls_case'].label, 'Export case')
def test_system_exporter_spreadsheet_xls_config_spread_xls_system_create_time_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_system_create_time'].label,
'Export system create time',
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_system_modify_time_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_system_modify_time'].label,
'Export system modify time',
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_worksheet_systemstatus_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_worksheet_systemstatus'].label,
'Export worksheet to explain systemstatus',
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_worksheet_analysisstatus_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_worksheet_analysisstatus'].label,
'Export worksheet to explain analysisstatus',
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_worksheet_reason_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_worksheet_reason'].label,
'Export worksheet to explain reason',
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_worksheet_recommendation_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_worksheet_recommendation'].label,
'Export worksheet to explain recommendation',
)
def test_system_exporter_spreadsheet_xls_config_spread_xls_worksheet_tag_form_label(
self,
):
"""test form label"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['spread_xls_worksheet_tag'].label,
'Export worksheet to explain tag',
)
def test_system_exporter_spreadsheet_xls_config_form_empty(self):
"""test minimum form requirements / VALID"""
# get object
form = SystemExporterSpreadsheetXlsConfigForm(data={})
# compare
self.assertTrue(form.is_valid())
| 31.964143
| 99
| 0.670447
| 770
| 8,023
| 6.605195
| 0.074026
| 0.077861
| 0.117971
| 0.132127
| 0.844278
| 0.809477
| 0.801219
| 0.801219
| 0.791978
| 0.693669
| 0
| 0
| 0.251278
| 8,023
| 250
| 100
| 32.092
| 0.846679
| 0.109435
| 0
| 0.473282
| 0
| 0
| 0.137556
| 0.047571
| 0
| 0
| 0
| 0
| 0.175573
| 1
| 0.175573
| false
| 0
| 0.015267
| 0
| 0.198473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71d7ddd3675caca3cd45a34b9a83fc5a5ecb32f3
| 59,956
|
py
|
Python
|
pytamil/தமிழ்/codegen/வெண்பாLexer.py
|
srix/pytamil
|
cfcaee618bb947242f3a9a4432c9e9a9b9a5bab0
|
[
"MIT"
] | 45
|
2019-09-08T14:11:21.000Z
|
2021-09-21T16:18:26.000Z
|
pytamil/தமிழ்/codegen/வெண்பாLexer.py
|
srix/pytamil
|
cfcaee618bb947242f3a9a4432c9e9a9b9a5bab0
|
[
"MIT"
] | 2
|
2019-12-07T13:51:41.000Z
|
2021-05-14T06:08:34.000Z
|
pytamil/தமிழ்/codegen/வெண்பாLexer.py
|
srix/pytamil
|
cfcaee618bb947242f3a9a4432c9e9a9b9a5bab0
|
[
"MIT"
] | 9
|
2019-09-08T15:41:30.000Z
|
2021-02-13T07:09:24.000Z
|
# Generated from /home/srix/workspace/pytamil/pytamil/தமிழ்/resources/வெண்பா.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u00fa")
buf.write("\u04bc\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\4\u00ba\t\u00ba\4\u00bb\t\u00bb")
buf.write("\4\u00bc\t\u00bc\4\u00bd\t\u00bd\4\u00be\t\u00be\4\u00bf")
buf.write("\t\u00bf\4\u00c0\t\u00c0\4\u00c1\t\u00c1\4\u00c2\t\u00c2")
buf.write("\4\u00c3\t\u00c3\4\u00c4\t\u00c4\4\u00c5\t\u00c5\4\u00c6")
buf.write("\t\u00c6\4\u00c7\t\u00c7\4\u00c8\t\u00c8\4\u00c9\t\u00c9")
buf.write("\4\u00ca\t\u00ca\4\u00cb\t\u00cb\4\u00cc\t\u00cc\4\u00cd")
buf.write("\t\u00cd\4\u00ce\t\u00ce\4\u00cf\t\u00cf\4\u00d0\t\u00d0")
buf.write("\4\u00d1\t\u00d1\4\u00d2\t\u00d2\4\u00d3\t\u00d3\4\u00d4")
buf.write("\t\u00d4\4\u00d5\t\u00d5\4\u00d6\t\u00d6\4\u00d7\t\u00d7")
buf.write("\4\u00d8\t\u00d8\4\u00d9\t\u00d9\4\u00da\t\u00da\4\u00db")
buf.write("\t\u00db\4\u00dc\t\u00dc\4\u00dd\t\u00dd\4\u00de\t\u00de")
buf.write("\4\u00df\t\u00df\4\u00e0\t\u00e0\4\u00e1\t\u00e1\4\u00e2")
buf.write("\t\u00e2\4\u00e3\t\u00e3\4\u00e4\t\u00e4\4\u00e5\t\u00e5")
buf.write("\4\u00e6\t\u00e6\4\u00e7\t\u00e7\4\u00e8\t\u00e8\4\u00e9")
buf.write("\t\u00e9\4\u00ea\t\u00ea\4\u00eb\t\u00eb\4\u00ec\t\u00ec")
buf.write("\4\u00ed\t\u00ed\4\u00ee\t\u00ee\4\u00ef\t\u00ef\4\u00f0")
buf.write("\t\u00f0\4\u00f1\t\u00f1\4\u00f2\t\u00f2\4\u00f3\t\u00f3")
buf.write("\4\u00f4\t\u00f4\4\u00f5\t\u00f5\4\u00f6\t\u00f6\4\u00f7")
buf.write("\t\u00f7\4\u00f8\t\u00f8\4\u00f9\t\u00f9\3\2\3\2\3\3\3")
buf.write("\3\3\3\3\4\3\4\3\4\3\5\3\5\3\5\3\6\3\6\3\6\3\7\3\7\3\7")
buf.write("\3\b\3\b\3\b\3\t\3\t\3\t\3\n\3\n\3\n\3\13\3\13\3\13\3")
buf.write("\f\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3")
buf.write("\20\3\20\3\20\3\21\3\21\3\21\3\22\3\22\3\22\3\23\3\23")
buf.write("\3\23\3\24\3\24\3\24\3\25\3\25\3\26\3\26\3\27\3\27\3\30")
buf.write("\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\33\3\34\3\34\3\34")
buf.write("\3\35\3\35\3\35\3\36\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3")
buf.write("!\3!\3\"\3\"\3\"\3#\3#\3#\3$\3$\3%\3%\3%\3&\3&\3&\3\'")
buf.write("\3\'\3\'\3(\3(\3(\3)\3)\3*\3*\3*\3+\3+\3+\3,\3,\3,\3-")
buf.write("\3-\3-\3.\3.\3/\3/\3/\3\60\3\60\3\60\3\61\3\61\3\61\3")
buf.write("\62\3\62\3\62\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\65")
buf.write("\3\66\3\66\3\66\3\67\3\67\3\67\38\38\39\39\39\3:\3:\3")
buf.write(":\3;\3;\3;\3<\3<\3<\3=\3=\3>\3>\3>\3?\3?\3?\3@\3@\3@\3")
buf.write("A\3A\3A\3B\3B\3C\3C\3C\3D\3D\3D\3E\3E\3E\3F\3F\3F\3G\3")
buf.write("G\3H\3H\3H\3I\3I\3I\3J\3J\3J\3K\3K\3K\3L\3L\3M\3M\3M\3")
buf.write("N\3N\3N\3O\3O\3O\3P\3P\3P\3Q\3Q\3R\3R\3R\3S\3S\3S\3T\3")
buf.write("T\3T\3U\3U\3U\3V\3V\3W\3W\3W\3X\3X\3X\3Y\3Y\3Y\3Z\3Z\3")
buf.write("Z\3[\3[\3\\\3\\\3\\\3]\3]\3]\3^\3^\3^\3_\3_\3_\3`\3`\3")
buf.write("a\3a\3a\3b\3b\3b\3c\3c\3c\3d\3d\3d\3e\3e\3f\3f\3f\3g\3")
buf.write("g\3g\3h\3h\3h\3i\3i\3i\3j\3j\3k\3k\3k\3l\3l\3l\3m\3m\3")
buf.write("m\3n\3n\3n\3o\3o\3p\3p\3p\3q\3q\3q\3r\3r\3r\3s\3s\3s\3")
buf.write("t\3t\3u\3u\3v\3v\3w\3w\3x\3x\3y\3y\3z\3z\3z\3{\3{\3{\3")
buf.write("|\3|\3|\3}\3}\3}\3~\3~\3~\3\177\3\177\3\177\3\u0080\3")
buf.write("\u0080\3\u0080\3\u0081\3\u0081\3\u0081\3\u0082\3\u0082")
buf.write("\3\u0082\3\u0083\3\u0083\3\u0083\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086\3\u0087")
buf.write("\3\u0087\3\u0087\3\u0088\3\u0088\3\u0088\3\u0089\3\u0089")
buf.write("\3\u0089\3\u008a\3\u008a\3\u008a\3\u008b\3\u008b\3\u008b")
buf.write("\3\u008c\3\u008c\3\u008c\3\u008d\3\u008d\3\u008d\3\u008e")
buf.write("\3\u008e\3\u008e\3\u008f\3\u008f\3\u008f\3\u0090\3\u0090")
buf.write("\3\u0090\3\u0091\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092")
buf.write("\3\u0093\3\u0093\3\u0093\3\u0094\3\u0094\3\u0094\3\u0095")
buf.write("\3\u0095\3\u0095\3\u0096\3\u0096\3\u0096\3\u0097\3\u0097")
buf.write("\3\u0097\3\u0098\3\u0098\3\u0098\3\u0099\3\u0099\3\u0099")
buf.write("\3\u009a\3\u009a\3\u009a\3\u009b\3\u009b\3\u009b\3\u009c")
buf.write("\3\u009c\3\u009c\3\u009d\3\u009d\3\u009d\3\u009e\3\u009e")
buf.write("\3\u009e\3\u009f\3\u009f\3\u009f\3\u00a0\3\u00a0\3\u00a0")
buf.write("\3\u00a1\3\u00a1\3\u00a1\3\u00a2\3\u00a2\3\u00a2\3\u00a3")
buf.write("\3\u00a3\3\u00a3\3\u00a4\3\u00a4\3\u00a4\3\u00a5\3\u00a5")
buf.write("\3\u00a5\3\u00a6\3\u00a6\3\u00a6\3\u00a7\3\u00a7\3\u00a7")
buf.write("\3\u00a8\3\u00a8\3\u00a8\3\u00a9\3\u00a9\3\u00a9\3\u00aa")
buf.write("\3\u00aa\3\u00aa\3\u00ab\3\u00ab\3\u00ab\3\u00ac\3\u00ac")
buf.write("\3\u00ac\3\u00ad\3\u00ad\3\u00ad\3\u00ae\3\u00ae\3\u00ae")
buf.write("\3\u00af\3\u00af\3\u00af\3\u00b0\3\u00b0\3\u00b0\3\u00b1")
buf.write("\3\u00b1\3\u00b1\3\u00b2\3\u00b2\3\u00b2\3\u00b3\3\u00b3")
buf.write("\3\u00b3\3\u00b4\3\u00b4\3\u00b4\3\u00b5\3\u00b5\3\u00b5")
buf.write("\3\u00b6\3\u00b6\3\u00b6\3\u00b7\3\u00b7\3\u00b7\3\u00b8")
buf.write("\3\u00b8\3\u00b8\3\u00b9\3\u00b9\3\u00b9\3\u00ba\3\u00ba")
buf.write("\3\u00ba\3\u00bb\3\u00bb\3\u00bb\3\u00bc\3\u00bc\3\u00bc")
buf.write("\3\u00bd\3\u00bd\3\u00bd\3\u00be\3\u00be\3\u00be\3\u00bf")
buf.write("\3\u00bf\3\u00bf\3\u00c0\3\u00c0\3\u00c0\3\u00c1\3\u00c1")
buf.write("\3\u00c1\3\u00c2\3\u00c2\3\u00c2\3\u00c3\3\u00c3\3\u00c3")
buf.write("\3\u00c4\3\u00c4\3\u00c4\3\u00c5\3\u00c5\3\u00c5\3\u00c6")
buf.write("\3\u00c6\3\u00c6\3\u00c7\3\u00c7\3\u00c7\3\u00c8\3\u00c8")
buf.write("\3\u00c8\3\u00c9\3\u00c9\3\u00c9\3\u00ca\3\u00ca\3\u00ca")
buf.write("\3\u00cb\3\u00cb\3\u00cb\3\u00cc\3\u00cc\3\u00cc\3\u00cd")
buf.write("\3\u00cd\3\u00cd\3\u00ce\3\u00ce\3\u00ce\3\u00cf\3\u00cf")
buf.write("\3\u00cf\3\u00d0\3\u00d0\3\u00d0\3\u00d1\3\u00d1\3\u00d1")
buf.write("\3\u00d2\3\u00d2\3\u00d2\3\u00d3\3\u00d3\3\u00d3\3\u00d4")
buf.write("\3\u00d4\3\u00d4\3\u00d5\3\u00d5\3\u00d5\3\u00d6\3\u00d6")
buf.write("\3\u00d6\3\u00d7\3\u00d7\3\u00d7\3\u00d8\3\u00d8\3\u00d8")
buf.write("\3\u00d9\3\u00d9\3\u00d9\3\u00da\3\u00da\3\u00da\3\u00db")
buf.write("\3\u00db\3\u00db\3\u00dc\3\u00dc\3\u00dc\3\u00dd\3\u00dd")
buf.write("\3\u00dd\3\u00de\3\u00de\3\u00de\3\u00df\3\u00df\3\u00df")
buf.write("\3\u00e0\3\u00e0\3\u00e0\3\u00e1\3\u00e1\3\u00e1\3\u00e2")
buf.write("\3\u00e2\3\u00e2\3\u00e3\3\u00e3\3\u00e3\3\u00e4\3\u00e4")
buf.write("\3\u00e4\3\u00e5\3\u00e5\3\u00e5\3\u00e6\3\u00e6\3\u00e6")
buf.write("\3\u00e7\3\u00e7\3\u00e7\3\u00e8\3\u00e8\3\u00e8\3\u00e9")
buf.write("\3\u00e9\3\u00e9\3\u00ea\3\u00ea\3\u00ea\3\u00eb\3\u00eb")
buf.write("\3\u00eb\3\u00ec\3\u00ec\3\u00ec\3\u00ed\3\u00ed\3\u00ed")
buf.write("\3\u00ee\3\u00ee\3\u00ee\3\u00ef\3\u00ef\3\u00ef\3\u00f0")
buf.write("\3\u00f0\3\u00f0\3\u00f1\3\u00f1\3\u00f1\3\u00f2\3\u00f2")
buf.write("\3\u00f2\3\u00f3\3\u00f3\3\u00f3\3\u00f4\3\u00f4\3\u00f4")
buf.write("\3\u00f5\3\u00f5\3\u00f5\3\u00f6\3\u00f6\3\u00f6\3\u00f7")
buf.write("\3\u00f7\3\u00f7\3\u00f8\3\u00f8\3\u00f8\3\u00f9\3\u00f9")
buf.write("\2\2\u00fa\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25")
buf.write("\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+")
buf.write("\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E")
buf.write("$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k")
buf.write("\67m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087")
buf.write("E\u0089F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097")
buf.write("M\u0099N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7")
buf.write("U\u00a9V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7")
buf.write("]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7")
buf.write("e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7")
buf.write("m\u00d9n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7")
buf.write("u\u00e9v\u00ebw\u00edx\u00efy\u00f1z\u00f3{\u00f5|\u00f7")
buf.write("}\u00f9~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101\u0082")
buf.write("\u0103\u0083\u0105\u0084\u0107\u0085\u0109\u0086\u010b")
buf.write("\u0087\u010d\u0088\u010f\u0089\u0111\u008a\u0113\u008b")
buf.write("\u0115\u008c\u0117\u008d\u0119\u008e\u011b\u008f\u011d")
buf.write("\u0090\u011f\u0091\u0121\u0092\u0123\u0093\u0125\u0094")
buf.write("\u0127\u0095\u0129\u0096\u012b\u0097\u012d\u0098\u012f")
buf.write("\u0099\u0131\u009a\u0133\u009b\u0135\u009c\u0137\u009d")
buf.write("\u0139\u009e\u013b\u009f\u013d\u00a0\u013f\u00a1\u0141")
buf.write("\u00a2\u0143\u00a3\u0145\u00a4\u0147\u00a5\u0149\u00a6")
buf.write("\u014b\u00a7\u014d\u00a8\u014f\u00a9\u0151\u00aa\u0153")
buf.write("\u00ab\u0155\u00ac\u0157\u00ad\u0159\u00ae\u015b\u00af")
buf.write("\u015d\u00b0\u015f\u00b1\u0161\u00b2\u0163\u00b3\u0165")
buf.write("\u00b4\u0167\u00b5\u0169\u00b6\u016b\u00b7\u016d\u00b8")
buf.write("\u016f\u00b9\u0171\u00ba\u0173\u00bb\u0175\u00bc\u0177")
buf.write("\u00bd\u0179\u00be\u017b\u00bf\u017d\u00c0\u017f\u00c1")
buf.write("\u0181\u00c2\u0183\u00c3\u0185\u00c4\u0187\u00c5\u0189")
buf.write("\u00c6\u018b\u00c7\u018d\u00c8\u018f\u00c9\u0191\u00ca")
buf.write("\u0193\u00cb\u0195\u00cc\u0197\u00cd\u0199\u00ce\u019b")
buf.write("\u00cf\u019d\u00d0\u019f\u00d1\u01a1\u00d2\u01a3\u00d3")
buf.write("\u01a5\u00d4\u01a7\u00d5\u01a9\u00d6\u01ab\u00d7\u01ad")
buf.write("\u00d8\u01af\u00d9\u01b1\u00da\u01b3\u00db\u01b5\u00dc")
buf.write("\u01b7\u00dd\u01b9\u00de\u01bb\u00df\u01bd\u00e0\u01bf")
buf.write("\u00e1\u01c1\u00e2\u01c3\u00e3\u01c5\u00e4\u01c7\u00e5")
buf.write("\u01c9\u00e6\u01cb\u00e7\u01cd\u00e8\u01cf\u00e9\u01d1")
buf.write("\u00ea\u01d3\u00eb\u01d5\u00ec\u01d7\u00ed\u01d9\u00ee")
buf.write("\u01db\u00ef\u01dd\u00f0\u01df\u00f1\u01e1\u00f2\u01e3")
buf.write("\u00f3\u01e5\u00f4\u01e7\u00f5\u01e9\u00f6\u01eb\u00f7")
buf.write("\u01ed\u00f8\u01ef\u00f9\u01f1\u00fa\3\2\2\2\u04bb\2\3")
buf.write("\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2")
buf.write("\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2")
buf.write("\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2")
buf.write("\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3")
buf.write("\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2")
buf.write("/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67")
buf.write("\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write("A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write("\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write("\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write("\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write("\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q")
buf.write("\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2")
buf.write("{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083")
buf.write("\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2")
buf.write("\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091")
buf.write("\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2")
buf.write("\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f")
buf.write("\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2")
buf.write("\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad")
buf.write("\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2")
buf.write("\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb")
buf.write("\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2")
buf.write("\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9")
buf.write("\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2")
buf.write("\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7")
buf.write("\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2")
buf.write("\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5")
buf.write("\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2")
buf.write("\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3")
buf.write("\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2")
buf.write("\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2\2\2\u0101")
buf.write("\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2")
buf.write("\2\2\u0109\3\2\2\2\2\u010b\3\2\2\2\2\u010d\3\2\2\2\2\u010f")
buf.write("\3\2\2\2\2\u0111\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2")
buf.write("\2\2\u0117\3\2\2\2\2\u0119\3\2\2\2\2\u011b\3\2\2\2\2\u011d")
buf.write("\3\2\2\2\2\u011f\3\2\2\2\2\u0121\3\2\2\2\2\u0123\3\2\2")
buf.write("\2\2\u0125\3\2\2\2\2\u0127\3\2\2\2\2\u0129\3\2\2\2\2\u012b")
buf.write("\3\2\2\2\2\u012d\3\2\2\2\2\u012f\3\2\2\2\2\u0131\3\2\2")
buf.write("\2\2\u0133\3\2\2\2\2\u0135\3\2\2\2\2\u0137\3\2\2\2\2\u0139")
buf.write("\3\2\2\2\2\u013b\3\2\2\2\2\u013d\3\2\2\2\2\u013f\3\2\2")
buf.write("\2\2\u0141\3\2\2\2\2\u0143\3\2\2\2\2\u0145\3\2\2\2\2\u0147")
buf.write("\3\2\2\2\2\u0149\3\2\2\2\2\u014b\3\2\2\2\2\u014d\3\2\2")
buf.write("\2\2\u014f\3\2\2\2\2\u0151\3\2\2\2\2\u0153\3\2\2\2\2\u0155")
buf.write("\3\2\2\2\2\u0157\3\2\2\2\2\u0159\3\2\2\2\2\u015b\3\2\2")
buf.write("\2\2\u015d\3\2\2\2\2\u015f\3\2\2\2\2\u0161\3\2\2\2\2\u0163")
buf.write("\3\2\2\2\2\u0165\3\2\2\2\2\u0167\3\2\2\2\2\u0169\3\2\2")
buf.write("\2\2\u016b\3\2\2\2\2\u016d\3\2\2\2\2\u016f\3\2\2\2\2\u0171")
buf.write("\3\2\2\2\2\u0173\3\2\2\2\2\u0175\3\2\2\2\2\u0177\3\2\2")
buf.write("\2\2\u0179\3\2\2\2\2\u017b\3\2\2\2\2\u017d\3\2\2\2\2\u017f")
buf.write("\3\2\2\2\2\u0181\3\2\2\2\2\u0183\3\2\2\2\2\u0185\3\2\2")
buf.write("\2\2\u0187\3\2\2\2\2\u0189\3\2\2\2\2\u018b\3\2\2\2\2\u018d")
buf.write("\3\2\2\2\2\u018f\3\2\2\2\2\u0191\3\2\2\2\2\u0193\3\2\2")
buf.write("\2\2\u0195\3\2\2\2\2\u0197\3\2\2\2\2\u0199\3\2\2\2\2\u019b")
buf.write("\3\2\2\2\2\u019d\3\2\2\2\2\u019f\3\2\2\2\2\u01a1\3\2\2")
buf.write("\2\2\u01a3\3\2\2\2\2\u01a5\3\2\2\2\2\u01a7\3\2\2\2\2\u01a9")
buf.write("\3\2\2\2\2\u01ab\3\2\2\2\2\u01ad\3\2\2\2\2\u01af\3\2\2")
buf.write("\2\2\u01b1\3\2\2\2\2\u01b3\3\2\2\2\2\u01b5\3\2\2\2\2\u01b7")
buf.write("\3\2\2\2\2\u01b9\3\2\2\2\2\u01bb\3\2\2\2\2\u01bd\3\2\2")
buf.write("\2\2\u01bf\3\2\2\2\2\u01c1\3\2\2\2\2\u01c3\3\2\2\2\2\u01c5")
buf.write("\3\2\2\2\2\u01c7\3\2\2\2\2\u01c9\3\2\2\2\2\u01cb\3\2\2")
buf.write("\2\2\u01cd\3\2\2\2\2\u01cf\3\2\2\2\2\u01d1\3\2\2\2\2\u01d3")
buf.write("\3\2\2\2\2\u01d5\3\2\2\2\2\u01d7\3\2\2\2\2\u01d9\3\2\2")
buf.write("\2\2\u01db\3\2\2\2\2\u01dd\3\2\2\2\2\u01df\3\2\2\2\2\u01e1")
buf.write("\3\2\2\2\2\u01e3\3\2\2\2\2\u01e5\3\2\2\2\2\u01e7\3\2\2")
buf.write("\2\2\u01e9\3\2\2\2\2\u01eb\3\2\2\2\2\u01ed\3\2\2\2\2\u01ef")
buf.write("\3\2\2\2\2\u01f1\3\2\2\2\3\u01f3\3\2\2\2\5\u01f5\3\2\2")
buf.write("\2\7\u01f8\3\2\2\2\t\u01fb\3\2\2\2\13\u01fe\3\2\2\2\r")
buf.write("\u0201\3\2\2\2\17\u0204\3\2\2\2\21\u0207\3\2\2\2\23\u020a")
buf.write("\3\2\2\2\25\u020d\3\2\2\2\27\u0210\3\2\2\2\31\u0213\3")
buf.write("\2\2\2\33\u0216\3\2\2\2\35\u0219\3\2\2\2\37\u021c\3\2")
buf.write("\2\2!\u021f\3\2\2\2#\u0222\3\2\2\2%\u0225\3\2\2\2\'\u0228")
buf.write("\3\2\2\2)\u022b\3\2\2\2+\u022d\3\2\2\2-\u022f\3\2\2\2")
buf.write("/\u0231\3\2\2\2\61\u0233\3\2\2\2\63\u0235\3\2\2\2\65\u0237")
buf.write("\3\2\2\2\67\u023a\3\2\2\29\u023d\3\2\2\2;\u0240\3\2\2")
buf.write("\2=\u0243\3\2\2\2?\u0245\3\2\2\2A\u0248\3\2\2\2C\u024b")
buf.write("\3\2\2\2E\u024e\3\2\2\2G\u0251\3\2\2\2I\u0253\3\2\2\2")
buf.write("K\u0256\3\2\2\2M\u0259\3\2\2\2O\u025c\3\2\2\2Q\u025f\3")
buf.write("\2\2\2S\u0261\3\2\2\2U\u0264\3\2\2\2W\u0267\3\2\2\2Y\u026a")
buf.write("\3\2\2\2[\u026d\3\2\2\2]\u026f\3\2\2\2_\u0272\3\2\2\2")
buf.write("a\u0275\3\2\2\2c\u0278\3\2\2\2e\u027b\3\2\2\2g\u027d\3")
buf.write("\2\2\2i\u0280\3\2\2\2k\u0283\3\2\2\2m\u0286\3\2\2\2o\u0289")
buf.write("\3\2\2\2q\u028b\3\2\2\2s\u028e\3\2\2\2u\u0291\3\2\2\2")
buf.write("w\u0294\3\2\2\2y\u0297\3\2\2\2{\u0299\3\2\2\2}\u029c\3")
buf.write("\2\2\2\177\u029f\3\2\2\2\u0081\u02a2\3\2\2\2\u0083\u02a5")
buf.write("\3\2\2\2\u0085\u02a7\3\2\2\2\u0087\u02aa\3\2\2\2\u0089")
buf.write("\u02ad\3\2\2\2\u008b\u02b0\3\2\2\2\u008d\u02b3\3\2\2\2")
buf.write("\u008f\u02b5\3\2\2\2\u0091\u02b8\3\2\2\2\u0093\u02bb\3")
buf.write("\2\2\2\u0095\u02be\3\2\2\2\u0097\u02c1\3\2\2\2\u0099\u02c3")
buf.write("\3\2\2\2\u009b\u02c6\3\2\2\2\u009d\u02c9\3\2\2\2\u009f")
buf.write("\u02cc\3\2\2\2\u00a1\u02cf\3\2\2\2\u00a3\u02d1\3\2\2\2")
buf.write("\u00a5\u02d4\3\2\2\2\u00a7\u02d7\3\2\2\2\u00a9\u02da\3")
buf.write("\2\2\2\u00ab\u02dd\3\2\2\2\u00ad\u02df\3\2\2\2\u00af\u02e2")
buf.write("\3\2\2\2\u00b1\u02e5\3\2\2\2\u00b3\u02e8\3\2\2\2\u00b5")
buf.write("\u02eb\3\2\2\2\u00b7\u02ed\3\2\2\2\u00b9\u02f0\3\2\2\2")
buf.write("\u00bb\u02f3\3\2\2\2\u00bd\u02f6\3\2\2\2\u00bf\u02f9\3")
buf.write("\2\2\2\u00c1\u02fb\3\2\2\2\u00c3\u02fe\3\2\2\2\u00c5\u0301")
buf.write("\3\2\2\2\u00c7\u0304\3\2\2\2\u00c9\u0307\3\2\2\2\u00cb")
buf.write("\u0309\3\2\2\2\u00cd\u030c\3\2\2\2\u00cf\u030f\3\2\2\2")
buf.write("\u00d1\u0312\3\2\2\2\u00d3\u0315\3\2\2\2\u00d5\u0317\3")
buf.write("\2\2\2\u00d7\u031a\3\2\2\2\u00d9\u031d\3\2\2\2\u00db\u0320")
buf.write("\3\2\2\2\u00dd\u0323\3\2\2\2\u00df\u0325\3\2\2\2\u00e1")
buf.write("\u0328\3\2\2\2\u00e3\u032b\3\2\2\2\u00e5\u032e\3\2\2\2")
buf.write("\u00e7\u0331\3\2\2\2\u00e9\u0333\3\2\2\2\u00eb\u0335\3")
buf.write("\2\2\2\u00ed\u0337\3\2\2\2\u00ef\u0339\3\2\2\2\u00f1\u033b")
buf.write("\3\2\2\2\u00f3\u033d\3\2\2\2\u00f5\u0340\3\2\2\2\u00f7")
buf.write("\u0343\3\2\2\2\u00f9\u0346\3\2\2\2\u00fb\u0349\3\2\2\2")
buf.write("\u00fd\u034c\3\2\2\2\u00ff\u034f\3\2\2\2\u0101\u0352\3")
buf.write("\2\2\2\u0103\u0355\3\2\2\2\u0105\u0358\3\2\2\2\u0107\u035b")
buf.write("\3\2\2\2\u0109\u035e\3\2\2\2\u010b\u0361\3\2\2\2\u010d")
buf.write("\u0364\3\2\2\2\u010f\u0367\3\2\2\2\u0111\u036a\3\2\2\2")
buf.write("\u0113\u036d\3\2\2\2\u0115\u0370\3\2\2\2\u0117\u0373\3")
buf.write("\2\2\2\u0119\u0376\3\2\2\2\u011b\u0379\3\2\2\2\u011d\u037c")
buf.write("\3\2\2\2\u011f\u037f\3\2\2\2\u0121\u0382\3\2\2\2\u0123")
buf.write("\u0385\3\2\2\2\u0125\u0388\3\2\2\2\u0127\u038b\3\2\2\2")
buf.write("\u0129\u038e\3\2\2\2\u012b\u0391\3\2\2\2\u012d\u0394\3")
buf.write("\2\2\2\u012f\u0397\3\2\2\2\u0131\u039a\3\2\2\2\u0133\u039d")
buf.write("\3\2\2\2\u0135\u03a0\3\2\2\2\u0137\u03a3\3\2\2\2\u0139")
buf.write("\u03a6\3\2\2\2\u013b\u03a9\3\2\2\2\u013d\u03ac\3\2\2\2")
buf.write("\u013f\u03af\3\2\2\2\u0141\u03b2\3\2\2\2\u0143\u03b5\3")
buf.write("\2\2\2\u0145\u03b8\3\2\2\2\u0147\u03bb\3\2\2\2\u0149\u03be")
buf.write("\3\2\2\2\u014b\u03c1\3\2\2\2\u014d\u03c4\3\2\2\2\u014f")
buf.write("\u03c7\3\2\2\2\u0151\u03ca\3\2\2\2\u0153\u03cd\3\2\2\2")
buf.write("\u0155\u03d0\3\2\2\2\u0157\u03d3\3\2\2\2\u0159\u03d6\3")
buf.write("\2\2\2\u015b\u03d9\3\2\2\2\u015d\u03dc\3\2\2\2\u015f\u03df")
buf.write("\3\2\2\2\u0161\u03e2\3\2\2\2\u0163\u03e5\3\2\2\2\u0165")
buf.write("\u03e8\3\2\2\2\u0167\u03eb\3\2\2\2\u0169\u03ee\3\2\2\2")
buf.write("\u016b\u03f1\3\2\2\2\u016d\u03f4\3\2\2\2\u016f\u03f7\3")
buf.write("\2\2\2\u0171\u03fa\3\2\2\2\u0173\u03fd\3\2\2\2\u0175\u0400")
buf.write("\3\2\2\2\u0177\u0403\3\2\2\2\u0179\u0406\3\2\2\2\u017b")
buf.write("\u0409\3\2\2\2\u017d\u040c\3\2\2\2\u017f\u040f\3\2\2\2")
buf.write("\u0181\u0412\3\2\2\2\u0183\u0415\3\2\2\2\u0185\u0418\3")
buf.write("\2\2\2\u0187\u041b\3\2\2\2\u0189\u041e\3\2\2\2\u018b\u0421")
buf.write("\3\2\2\2\u018d\u0424\3\2\2\2\u018f\u0427\3\2\2\2\u0191")
buf.write("\u042a\3\2\2\2\u0193\u042d\3\2\2\2\u0195\u0430\3\2\2\2")
buf.write("\u0197\u0433\3\2\2\2\u0199\u0436\3\2\2\2\u019b\u0439\3")
buf.write("\2\2\2\u019d\u043c\3\2\2\2\u019f\u043f\3\2\2\2\u01a1\u0442")
buf.write("\3\2\2\2\u01a3\u0445\3\2\2\2\u01a5\u0448\3\2\2\2\u01a7")
buf.write("\u044b\3\2\2\2\u01a9\u044e\3\2\2\2\u01ab\u0451\3\2\2\2")
buf.write("\u01ad\u0454\3\2\2\2\u01af\u0457\3\2\2\2\u01b1\u045a\3")
buf.write("\2\2\2\u01b3\u045d\3\2\2\2\u01b5\u0460\3\2\2\2\u01b7\u0463")
buf.write("\3\2\2\2\u01b9\u0466\3\2\2\2\u01bb\u0469\3\2\2\2\u01bd")
buf.write("\u046c\3\2\2\2\u01bf\u046f\3\2\2\2\u01c1\u0472\3\2\2\2")
buf.write("\u01c3\u0475\3\2\2\2\u01c5\u0478\3\2\2\2\u01c7\u047b\3")
buf.write("\2\2\2\u01c9\u047e\3\2\2\2\u01cb\u0481\3\2\2\2\u01cd\u0484")
buf.write("\3\2\2\2\u01cf\u0487\3\2\2\2\u01d1\u048a\3\2\2\2\u01d3")
buf.write("\u048d\3\2\2\2\u01d5\u0490\3\2\2\2\u01d7\u0493\3\2\2\2")
buf.write("\u01d9\u0496\3\2\2\2\u01db\u0499\3\2\2\2\u01dd\u049c\3")
buf.write("\2\2\2\u01df\u049f\3\2\2\2\u01e1\u04a2\3\2\2\2\u01e3\u04a5")
buf.write("\3\2\2\2\u01e5\u04a8\3\2\2\2\u01e7\u04ab\3\2\2\2\u01e9")
buf.write("\u04ae\3\2\2\2\u01eb\u04b1\3\2\2\2\u01ed\u04b4\3\2\2\2")
buf.write("\u01ef\u04b7\3\2\2\2\u01f1\u04ba\3\2\2\2\u01f3\u01f4\7")
buf.write("\f\2\2\u01f4\4\3\2\2\2\u01f5\u01f6\7\u0b97\2\2\u01f6\u01f7")
buf.write("\7\u0bcf\2\2\u01f7\6\3\2\2\2\u01f8\u01f9\7\u0b9c\2\2\u01f9")
buf.write("\u01fa\7\u0bcf\2\2\u01fa\b\3\2\2\2\u01fb\u01fc\7\u0ba1")
buf.write("\2\2\u01fc\u01fd\7\u0bcf\2\2\u01fd\n\3\2\2\2\u01fe\u01ff")
buf.write("\7\u0ba6\2\2\u01ff\u0200\7\u0bcf\2\2\u0200\f\3\2\2\2\u0201")
buf.write("\u0202\7\u0bac\2\2\u0202\u0203\7\u0bcf\2\2\u0203\16\3")
buf.write("\2\2\2\u0204\u0205\7\u0bb3\2\2\u0205\u0206\7\u0bcf\2\2")
buf.write("\u0206\20\3\2\2\2\u0207\u0208\7\u0ba0\2\2\u0208\u0209")
buf.write("\7\u0bcf\2\2\u0209\22\3\2\2\2\u020a\u020b\7\u0b9b\2\2")
buf.write("\u020b\u020c\7\u0bcf\2\2\u020c\24\3\2\2\2\u020d\u020e")
buf.write("\7\u0ba5\2\2\u020e\u020f\7\u0bcf\2\2\u020f\26\3\2\2\2")
buf.write("\u0210\u0211\7\u0baa\2\2\u0211\u0212\7\u0bcf\2\2\u0212")
buf.write("\30\3\2\2\2\u0213\u0214\7\u0bb0\2\2\u0214\u0215\7\u0bcf")
buf.write("\2\2\u0215\32\3\2\2\2\u0216\u0217\7\u0bab\2\2\u0217\u0218")
buf.write("\7\u0bcf\2\2\u0218\34\3\2\2\2\u0219\u021a\7\u0bb1\2\2")
buf.write("\u021a\u021b\7\u0bcf\2\2\u021b\36\3\2\2\2\u021c\u021d")
buf.write("\7\u0bb2\2\2\u021d\u021e\7\u0bcf\2\2\u021e \3\2\2\2\u021f")
buf.write("\u0220\7\u0bb4\2\2\u0220\u0221\7\u0bcf\2\2\u0221\"\3\2")
buf.write("\2\2\u0222\u0223\7\u0bb7\2\2\u0223\u0224\7\u0bcf\2\2\u0224")
buf.write("$\3\2\2\2\u0225\u0226\7\u0bb6\2\2\u0226\u0227\7\u0bcf")
buf.write("\2\2\u0227&\3\2\2\2\u0228\u0229\7\u0bb5\2\2\u0229\u022a")
buf.write("\7\u0bcf\2\2\u022a(\3\2\2\2\u022b\u022c\7\u0b87\2\2\u022c")
buf.write("*\3\2\2\2\u022d\u022e\7\u0b89\2\2\u022e,\3\2\2\2\u022f")
buf.write("\u0230\7\u0b8b\2\2\u0230.\3\2\2\2\u0231\u0232\7\u0b90")
buf.write("\2\2\u0232\60\3\2\2\2\u0233\u0234\7\u0b94\2\2\u0234\62")
buf.write("\3\2\2\2\u0235\u0236\7\u0b97\2\2\u0236\64\3\2\2\2\u0237")
buf.write("\u0238\7\u0b97\2\2\u0238\u0239\7\u0bc1\2\2\u0239\66\3")
buf.write("\2\2\2\u023a\u023b\7\u0b97\2\2\u023b\u023c\7\u0bc3\2\2")
buf.write("\u023c8\3\2\2\2\u023d\u023e\7\u0b97\2\2\u023e\u023f\7")
buf.write("\u0bc8\2\2\u023f:\3\2\2\2\u0240\u0241\7\u0b97\2\2\u0241")
buf.write("\u0242\7\u0bcc\2\2\u0242<\3\2\2\2\u0243\u0244\7\u0b9b")
buf.write("\2\2\u0244>\3\2\2\2\u0245\u0246\7\u0b9b\2\2\u0246\u0247")
buf.write("\7\u0bc1\2\2\u0247@\3\2\2\2\u0248\u0249\7\u0b9b\2\2\u0249")
buf.write("\u024a\7\u0bc3\2\2\u024aB\3\2\2\2\u024b\u024c\7\u0b9b")
buf.write("\2\2\u024c\u024d\7\u0bc8\2\2\u024dD\3\2\2\2\u024e\u024f")
buf.write("\7\u0b9b\2\2\u024f\u0250\7\u0bcc\2\2\u0250F\3\2\2\2\u0251")
buf.write("\u0252\7\u0b9c\2\2\u0252H\3\2\2\2\u0253\u0254\7\u0b9c")
buf.write("\2\2\u0254\u0255\7\u0bc1\2\2\u0255J\3\2\2\2\u0256\u0257")
buf.write("\7\u0b9c\2\2\u0257\u0258\7\u0bc3\2\2\u0258L\3\2\2\2\u0259")
buf.write("\u025a\7\u0b9c\2\2\u025a\u025b\7\u0bc8\2\2\u025bN\3\2")
buf.write("\2\2\u025c\u025d\7\u0b9c\2\2\u025d\u025e\7\u0bcc\2\2\u025e")
buf.write("P\3\2\2\2\u025f\u0260\7\u0ba0\2\2\u0260R\3\2\2\2\u0261")
buf.write("\u0262\7\u0ba0\2\2\u0262\u0263\7\u0bc1\2\2\u0263T\3\2")
buf.write("\2\2\u0264\u0265\7\u0ba0\2\2\u0265\u0266\7\u0bc3\2\2\u0266")
buf.write("V\3\2\2\2\u0267\u0268\7\u0ba0\2\2\u0268\u0269\7\u0bc8")
buf.write("\2\2\u0269X\3\2\2\2\u026a\u026b\7\u0ba0\2\2\u026b\u026c")
buf.write("\7\u0bcc\2\2\u026cZ\3\2\2\2\u026d\u026e\7\u0ba1\2\2\u026e")
buf.write("\\\3\2\2\2\u026f\u0270\7\u0ba1\2\2\u0270\u0271\7\u0bc1")
buf.write("\2\2\u0271^\3\2\2\2\u0272\u0273\7\u0ba1\2\2\u0273\u0274")
buf.write("\7\u0bc3\2\2\u0274`\3\2\2\2\u0275\u0276\7\u0ba1\2\2\u0276")
buf.write("\u0277\7\u0bc8\2\2\u0277b\3\2\2\2\u0278\u0279\7\u0ba1")
buf.write("\2\2\u0279\u027a\7\u0bcc\2\2\u027ad\3\2\2\2\u027b\u027c")
buf.write("\7\u0ba5\2\2\u027cf\3\2\2\2\u027d\u027e\7\u0ba5\2\2\u027e")
buf.write("\u027f\7\u0bc1\2\2\u027fh\3\2\2\2\u0280\u0281\7\u0ba5")
buf.write("\2\2\u0281\u0282\7\u0bc3\2\2\u0282j\3\2\2\2\u0283\u0284")
buf.write("\7\u0ba5\2\2\u0284\u0285\7\u0bc8\2\2\u0285l\3\2\2\2\u0286")
buf.write("\u0287\7\u0ba5\2\2\u0287\u0288\7\u0bcc\2\2\u0288n\3\2")
buf.write("\2\2\u0289\u028a\7\u0ba6\2\2\u028ap\3\2\2\2\u028b\u028c")
buf.write("\7\u0ba6\2\2\u028c\u028d\7\u0bc1\2\2\u028dr\3\2\2\2\u028e")
buf.write("\u028f\7\u0ba6\2\2\u028f\u0290\7\u0bc3\2\2\u0290t\3\2")
buf.write("\2\2\u0291\u0292\7\u0ba6\2\2\u0292\u0293\7\u0bc8\2\2\u0293")
buf.write("v\3\2\2\2\u0294\u0295\7\u0ba6\2\2\u0295\u0296\7\u0bcc")
buf.write("\2\2\u0296x\3\2\2\2\u0297\u0298\7\u0baa\2\2\u0298z\3\2")
buf.write("\2\2\u0299\u029a\7\u0baa\2\2\u029a\u029b\7\u0bc1\2\2\u029b")
buf.write("|\3\2\2\2\u029c\u029d\7\u0baa\2\2\u029d\u029e\7\u0bc3")
buf.write("\2\2\u029e~\3\2\2\2\u029f\u02a0\7\u0baa\2\2\u02a0\u02a1")
buf.write("\7\u0bc8\2\2\u02a1\u0080\3\2\2\2\u02a2\u02a3\7\u0baa\2")
buf.write("\2\u02a3\u02a4\7\u0bcc\2\2\u02a4\u0082\3\2\2\2\u02a5\u02a6")
buf.write("\7\u0bac\2\2\u02a6\u0084\3\2\2\2\u02a7\u02a8\7\u0bac\2")
buf.write("\2\u02a8\u02a9\7\u0bc1\2\2\u02a9\u0086\3\2\2\2\u02aa\u02ab")
buf.write("\7\u0bac\2\2\u02ab\u02ac\7\u0bc3\2\2\u02ac\u0088\3\2\2")
buf.write("\2\u02ad\u02ae\7\u0bac\2\2\u02ae\u02af\7\u0bc8\2\2\u02af")
buf.write("\u008a\3\2\2\2\u02b0\u02b1\7\u0bac\2\2\u02b1\u02b2\7\u0bcc")
buf.write("\2\2\u02b2\u008c\3\2\2\2\u02b3\u02b4\7\u0bb0\2\2\u02b4")
buf.write("\u008e\3\2\2\2\u02b5\u02b6\7\u0bb0\2\2\u02b6\u02b7\7\u0bc1")
buf.write("\2\2\u02b7\u0090\3\2\2\2\u02b8\u02b9\7\u0bb0\2\2\u02b9")
buf.write("\u02ba\7\u0bc3\2\2\u02ba\u0092\3\2\2\2\u02bb\u02bc\7\u0bb0")
buf.write("\2\2\u02bc\u02bd\7\u0bc8\2\2\u02bd\u0094\3\2\2\2\u02be")
buf.write("\u02bf\7\u0bb0\2\2\u02bf\u02c0\7\u0bcc\2\2\u02c0\u0096")
buf.write("\3\2\2\2\u02c1\u02c2\7\u0bb1\2\2\u02c2\u0098\3\2\2\2\u02c3")
buf.write("\u02c4\7\u0bb1\2\2\u02c4\u02c5\7\u0bc1\2\2\u02c5\u009a")
buf.write("\3\2\2\2\u02c6\u02c7\7\u0bb1\2\2\u02c7\u02c8\7\u0bc3\2")
buf.write("\2\u02c8\u009c\3\2\2\2\u02c9\u02ca\7\u0bb1\2\2\u02ca\u02cb")
buf.write("\7\u0bc8\2\2\u02cb\u009e\3\2\2\2\u02cc\u02cd\7\u0bb1\2")
buf.write("\2\u02cd\u02ce\7\u0bcc\2\2\u02ce\u00a0\3\2\2\2\u02cf\u02d0")
buf.write("\7\u0bb2\2\2\u02d0\u00a2\3\2\2\2\u02d1\u02d2\7\u0bb2\2")
buf.write("\2\u02d2\u02d3\7\u0bc1\2\2\u02d3\u00a4\3\2\2\2\u02d4\u02d5")
buf.write("\7\u0bb2\2\2\u02d5\u02d6\7\u0bc3\2\2\u02d6\u00a6\3\2\2")
buf.write("\2\u02d7\u02d8\7\u0bb2\2\2\u02d8\u02d9\7\u0bc8\2\2\u02d9")
buf.write("\u00a8\3\2\2\2\u02da\u02db\7\u0bb2\2\2\u02db\u02dc\7\u0bcc")
buf.write("\2\2\u02dc\u00aa\3\2\2\2\u02dd\u02de\7\u0bb4\2\2\u02de")
buf.write("\u00ac\3\2\2\2\u02df\u02e0\7\u0bb4\2\2\u02e0\u02e1\7\u0bc1")
buf.write("\2\2\u02e1\u00ae\3\2\2\2\u02e2\u02e3\7\u0bb4\2\2\u02e3")
buf.write("\u02e4\7\u0bc3\2\2\u02e4\u00b0\3\2\2\2\u02e5\u02e6\7\u0bb4")
buf.write("\2\2\u02e6\u02e7\7\u0bc8\2\2\u02e7\u00b2\3\2\2\2\u02e8")
buf.write("\u02e9\7\u0bb4\2\2\u02e9\u02ea\7\u0bcc\2\2\u02ea\u00b4")
buf.write("\3\2\2\2\u02eb\u02ec\7\u0bb7\2\2\u02ec\u00b6\3\2\2\2\u02ed")
buf.write("\u02ee\7\u0bb7\2\2\u02ee\u02ef\7\u0bc1\2\2\u02ef\u00b8")
buf.write("\3\2\2\2\u02f0\u02f1\7\u0bb7\2\2\u02f1\u02f2\7\u0bc3\2")
buf.write("\2\u02f2\u00ba\3\2\2\2\u02f3\u02f4\7\u0bb7\2\2\u02f4\u02f5")
buf.write("\7\u0bc8\2\2\u02f5\u00bc\3\2\2\2\u02f6\u02f7\7\u0bb7\2")
buf.write("\2\u02f7\u02f8\7\u0bcc\2\2\u02f8\u00be\3\2\2\2\u02f9\u02fa")
buf.write("\7\u0bb6\2\2\u02fa\u00c0\3\2\2\2\u02fb\u02fc\7\u0bb6\2")
buf.write("\2\u02fc\u02fd\7\u0bc1\2\2\u02fd\u00c2\3\2\2\2\u02fe\u02ff")
buf.write("\7\u0bb6\2\2\u02ff\u0300\7\u0bc3\2\2\u0300\u00c4\3\2\2")
buf.write("\2\u0301\u0302\7\u0bb6\2\2\u0302\u0303\7\u0bc8\2\2\u0303")
buf.write("\u00c6\3\2\2\2\u0304\u0305\7\u0bb6\2\2\u0305\u0306\7\u0bcc")
buf.write("\2\2\u0306\u00c8\3\2\2\2\u0307\u0308\7\u0bb5\2\2\u0308")
buf.write("\u00ca\3\2\2\2\u0309\u030a\7\u0bb5\2\2\u030a\u030b\7\u0bc1")
buf.write("\2\2\u030b\u00cc\3\2\2\2\u030c\u030d\7\u0bb5\2\2\u030d")
buf.write("\u030e\7\u0bc3\2\2\u030e\u00ce\3\2\2\2\u030f\u0310\7\u0bb5")
buf.write("\2\2\u0310\u0311\7\u0bc8\2\2\u0311\u00d0\3\2\2\2\u0312")
buf.write("\u0313\7\u0bb5\2\2\u0313\u0314\7\u0bcc\2\2\u0314\u00d2")
buf.write("\3\2\2\2\u0315\u0316\7\u0bb3\2\2\u0316\u00d4\3\2\2\2\u0317")
buf.write("\u0318\7\u0bb3\2\2\u0318\u0319\7\u0bc1\2\2\u0319\u00d6")
buf.write("\3\2\2\2\u031a\u031b\7\u0bb3\2\2\u031b\u031c\7\u0bc3\2")
buf.write("\2\u031c\u00d8\3\2\2\2\u031d\u031e\7\u0bb3\2\2\u031e\u031f")
buf.write("\7\u0bc8\2\2\u031f\u00da\3\2\2\2\u0320\u0321\7\u0bb3\2")
buf.write("\2\u0321\u0322\7\u0bcc\2\2\u0322\u00dc\3\2\2\2\u0323\u0324")
buf.write("\7\u0bab\2\2\u0324\u00de\3\2\2\2\u0325\u0326\7\u0bab\2")
buf.write("\2\u0326\u0327\7\u0bc1\2\2\u0327\u00e0\3\2\2\2\u0328\u0329")
buf.write("\7\u0bab\2\2\u0329\u032a\7\u0bc3\2\2\u032a\u00e2\3\2\2")
buf.write("\2\u032b\u032c\7\u0bab\2\2\u032c\u032d\7\u0bc8\2\2\u032d")
buf.write("\u00e4\3\2\2\2\u032e\u032f\7\u0bab\2\2\u032f\u0330\7\u0bcc")
buf.write("\2\2\u0330\u00e6\3\2\2\2\u0331\u0332\7\u0b88\2\2\u0332")
buf.write("\u00e8\3\2\2\2\u0333\u0334\7\u0b8a\2\2\u0334\u00ea\3\2")
buf.write("\2\2\u0335\u0336\7\u0b8c\2\2\u0336\u00ec\3\2\2\2\u0337")
buf.write("\u0338\7\u0b91\2\2\u0338\u00ee\3\2\2\2\u0339\u033a\7\u0b92")
buf.write("\2\2\u033a\u00f0\3\2\2\2\u033b\u033c\7\u0b95\2\2\u033c")
buf.write("\u00f2\3\2\2\2\u033d\u033e\7\u0b94\2\2\u033e\u033f\7\u0bb5")
buf.write("\2\2\u033f\u00f4\3\2\2\2\u0340\u0341\7\u0b97\2\2\u0341")
buf.write("\u0342\7\u0bc0\2\2\u0342\u00f6\3\2\2\2\u0343\u0344\7\u0b97")
buf.write("\2\2\u0344\u0345\7\u0bc2\2\2\u0345\u00f8\3\2\2\2\u0346")
buf.write("\u0347\7\u0b97\2\2\u0347\u0348\7\u0bc4\2\2\u0348\u00fa")
buf.write("\3\2\2\2\u0349\u034a\7\u0b97\2\2\u034a\u034b\7\u0bc9\2")
buf.write("\2\u034b\u00fc\3\2\2\2\u034c\u034d\7\u0b97\2\2\u034d\u034e")
buf.write("\7\u0bca\2\2\u034e\u00fe\3\2\2\2\u034f\u0350\7\u0b97\2")
buf.write("\2\u0350\u0351\7\u0bcd\2\2\u0351\u0100\3\2\2\2\u0352\u0353")
buf.write("\7\u0b97\2\2\u0353\u0354\7\u0bce\2\2\u0354\u0102\3\2\2")
buf.write("\2\u0355\u0356\7\u0b9b\2\2\u0356\u0357\7\u0bc0\2\2\u0357")
buf.write("\u0104\3\2\2\2\u0358\u0359\7\u0b9b\2\2\u0359\u035a\7\u0bc2")
buf.write("\2\2\u035a\u0106\3\2\2\2\u035b\u035c\7\u0b9b\2\2\u035c")
buf.write("\u035d\7\u0bc4\2\2\u035d\u0108\3\2\2\2\u035e\u035f\7\u0b9b")
buf.write("\2\2\u035f\u0360\7\u0bc9\2\2\u0360\u010a\3\2\2\2\u0361")
buf.write("\u0362\7\u0b9b\2\2\u0362\u0363\7\u0bca\2\2\u0363\u010c")
buf.write("\3\2\2\2\u0364\u0365\7\u0b9b\2\2\u0365\u0366\7\u0bcd\2")
buf.write("\2\u0366\u010e\3\2\2\2\u0367\u0368\7\u0b9b\2\2\u0368\u0369")
buf.write("\7\u0bce\2\2\u0369\u0110\3\2\2\2\u036a\u036b\7\u0b9c\2")
buf.write("\2\u036b\u036c\7\u0bc0\2\2\u036c\u0112\3\2\2\2\u036d\u036e")
buf.write("\7\u0b9c\2\2\u036e\u036f\7\u0bc2\2\2\u036f\u0114\3\2\2")
buf.write("\2\u0370\u0371\7\u0b9c\2\2\u0371\u0372\7\u0bc4\2\2\u0372")
buf.write("\u0116\3\2\2\2\u0373\u0374\7\u0b9c\2\2\u0374\u0375\7\u0bc9")
buf.write("\2\2\u0375\u0118\3\2\2\2\u0376\u0377\7\u0b9c\2\2\u0377")
buf.write("\u0378\7\u0bca\2\2\u0378\u011a\3\2\2\2\u0379\u037a\7\u0b9c")
buf.write("\2\2\u037a\u037b\7\u0bcd\2\2\u037b\u011c\3\2\2\2\u037c")
buf.write("\u037d\7\u0b9c\2\2\u037d\u037e\7\u0bce\2\2\u037e\u011e")
buf.write("\3\2\2\2\u037f\u0380\7\u0ba0\2\2\u0380\u0381\7\u0bc0\2")
buf.write("\2\u0381\u0120\3\2\2\2\u0382\u0383\7\u0ba0\2\2\u0383\u0384")
buf.write("\7\u0bc2\2\2\u0384\u0122\3\2\2\2\u0385\u0386\7\u0ba0\2")
buf.write("\2\u0386\u0387\7\u0bc4\2\2\u0387\u0124\3\2\2\2\u0388\u0389")
buf.write("\7\u0ba0\2\2\u0389\u038a\7\u0bc9\2\2\u038a\u0126\3\2\2")
buf.write("\2\u038b\u038c\7\u0ba0\2\2\u038c\u038d\7\u0bca\2\2\u038d")
buf.write("\u0128\3\2\2\2\u038e\u038f\7\u0ba0\2\2\u038f\u0390\7\u0bcd")
buf.write("\2\2\u0390\u012a\3\2\2\2\u0391\u0392\7\u0ba0\2\2\u0392")
buf.write("\u0393\7\u0bce\2\2\u0393\u012c\3\2\2\2\u0394\u0395\7\u0ba1")
buf.write("\2\2\u0395\u0396\7\u0bc0\2\2\u0396\u012e\3\2\2\2\u0397")
buf.write("\u0398\7\u0ba1\2\2\u0398\u0399\7\u0bc2\2\2\u0399\u0130")
buf.write("\3\2\2\2\u039a\u039b\7\u0ba1\2\2\u039b\u039c\7\u0bc4\2")
buf.write("\2\u039c\u0132\3\2\2\2\u039d\u039e\7\u0ba1\2\2\u039e\u039f")
buf.write("\7\u0bc9\2\2\u039f\u0134\3\2\2\2\u03a0\u03a1\7\u0ba1\2")
buf.write("\2\u03a1\u03a2\7\u0bca\2\2\u03a2\u0136\3\2\2\2\u03a3\u03a4")
buf.write("\7\u0ba1\2\2\u03a4\u03a5\7\u0bcd\2\2\u03a5\u0138\3\2\2")
buf.write("\2\u03a6\u03a7\7\u0ba1\2\2\u03a7\u03a8\7\u0bce\2\2\u03a8")
buf.write("\u013a\3\2\2\2\u03a9\u03aa\7\u0ba5\2\2\u03aa\u03ab\7\u0bc0")
buf.write("\2\2\u03ab\u013c\3\2\2\2\u03ac\u03ad\7\u0ba5\2\2\u03ad")
buf.write("\u03ae\7\u0bc2\2\2\u03ae\u013e\3\2\2\2\u03af\u03b0\7\u0ba5")
buf.write("\2\2\u03b0\u03b1\7\u0bc4\2\2\u03b1\u0140\3\2\2\2\u03b2")
buf.write("\u03b3\7\u0ba5\2\2\u03b3\u03b4\7\u0bc9\2\2\u03b4\u0142")
buf.write("\3\2\2\2\u03b5\u03b6\7\u0ba5\2\2\u03b6\u03b7\7\u0bca\2")
buf.write("\2\u03b7\u0144\3\2\2\2\u03b8\u03b9\7\u0ba5\2\2\u03b9\u03ba")
buf.write("\7\u0bcd\2\2\u03ba\u0146\3\2\2\2\u03bb\u03bc\7\u0ba5\2")
buf.write("\2\u03bc\u03bd\7\u0bce\2\2\u03bd\u0148\3\2\2\2\u03be\u03bf")
buf.write("\7\u0ba6\2\2\u03bf\u03c0\7\u0bc0\2\2\u03c0\u014a\3\2\2")
buf.write("\2\u03c1\u03c2\7\u0ba6\2\2\u03c2\u03c3\7\u0bc2\2\2\u03c3")
buf.write("\u014c\3\2\2\2\u03c4\u03c5\7\u0ba6\2\2\u03c5\u03c6\7\u0bc4")
buf.write("\2\2\u03c6\u014e\3\2\2\2\u03c7\u03c8\7\u0ba6\2\2\u03c8")
buf.write("\u03c9\7\u0bc9\2\2\u03c9\u0150\3\2\2\2\u03ca\u03cb\7\u0ba6")
buf.write("\2\2\u03cb\u03cc\7\u0bca\2\2\u03cc\u0152\3\2\2\2\u03cd")
buf.write("\u03ce\7\u0ba6\2\2\u03ce\u03cf\7\u0bcd\2\2\u03cf\u0154")
buf.write("\3\2\2\2\u03d0\u03d1\7\u0ba6\2\2\u03d1\u03d2\7\u0bce\2")
buf.write("\2\u03d2\u0156\3\2\2\2\u03d3\u03d4\7\u0baa\2\2\u03d4\u03d5")
buf.write("\7\u0bc0\2\2\u03d5\u0158\3\2\2\2\u03d6\u03d7\7\u0baa\2")
buf.write("\2\u03d7\u03d8\7\u0bc2\2\2\u03d8\u015a\3\2\2\2\u03d9\u03da")
buf.write("\7\u0baa\2\2\u03da\u03db\7\u0bc4\2\2\u03db\u015c\3\2\2")
buf.write("\2\u03dc\u03dd\7\u0baa\2\2\u03dd\u03de\7\u0bc9\2\2\u03de")
buf.write("\u015e\3\2\2\2\u03df\u03e0\7\u0baa\2\2\u03e0\u03e1\7\u0bca")
buf.write("\2\2\u03e1\u0160\3\2\2\2\u03e2\u03e3\7\u0baa\2\2\u03e3")
buf.write("\u03e4\7\u0bcd\2\2\u03e4\u0162\3\2\2\2\u03e5\u03e6\7\u0baa")
buf.write("\2\2\u03e6\u03e7\7\u0bce\2\2\u03e7\u0164\3\2\2\2\u03e8")
buf.write("\u03e9\7\u0bac\2\2\u03e9\u03ea\7\u0bc0\2\2\u03ea\u0166")
buf.write("\3\2\2\2\u03eb\u03ec\7\u0bac\2\2\u03ec\u03ed\7\u0bc2\2")
buf.write("\2\u03ed\u0168\3\2\2\2\u03ee\u03ef\7\u0bac\2\2\u03ef\u03f0")
buf.write("\7\u0bc4\2\2\u03f0\u016a\3\2\2\2\u03f1\u03f2\7\u0bac\2")
buf.write("\2\u03f2\u03f3\7\u0bc9\2\2\u03f3\u016c\3\2\2\2\u03f4\u03f5")
buf.write("\7\u0bac\2\2\u03f5\u03f6\7\u0bca\2\2\u03f6\u016e\3\2\2")
buf.write("\2\u03f7\u03f8\7\u0bac\2\2\u03f8\u03f9\7\u0bcd\2\2\u03f9")
buf.write("\u0170\3\2\2\2\u03fa\u03fb\7\u0bac\2\2\u03fb\u03fc\7\u0bce")
buf.write("\2\2\u03fc\u0172\3\2\2\2\u03fd\u03fe\7\u0bb0\2\2\u03fe")
buf.write("\u03ff\7\u0bc0\2\2\u03ff\u0174\3\2\2\2\u0400\u0401\7\u0bb0")
buf.write("\2\2\u0401\u0402\7\u0bc2\2\2\u0402\u0176\3\2\2\2\u0403")
buf.write("\u0404\7\u0bb0\2\2\u0404\u0405\7\u0bc4\2\2\u0405\u0178")
buf.write("\3\2\2\2\u0406\u0407\7\u0bb0\2\2\u0407\u0408\7\u0bc9\2")
buf.write("\2\u0408\u017a\3\2\2\2\u0409\u040a\7\u0bb0\2\2\u040a\u040b")
buf.write("\7\u0bca\2\2\u040b\u017c\3\2\2\2\u040c\u040d\7\u0bb0\2")
buf.write("\2\u040d\u040e\7\u0bcd\2\2\u040e\u017e\3\2\2\2\u040f\u0410")
buf.write("\7\u0bb0\2\2\u0410\u0411\7\u0bce\2\2\u0411\u0180\3\2\2")
buf.write("\2\u0412\u0413\7\u0bb1\2\2\u0413\u0414\7\u0bc0\2\2\u0414")
buf.write("\u0182\3\2\2\2\u0415\u0416\7\u0bb1\2\2\u0416\u0417\7\u0bc2")
buf.write("\2\2\u0417\u0184\3\2\2\2\u0418\u0419\7\u0bb1\2\2\u0419")
buf.write("\u041a\7\u0bc4\2\2\u041a\u0186\3\2\2\2\u041b\u041c\7\u0bb1")
buf.write("\2\2\u041c\u041d\7\u0bc9\2\2\u041d\u0188\3\2\2\2\u041e")
buf.write("\u041f\7\u0bb1\2\2\u041f\u0420\7\u0bca\2\2\u0420\u018a")
buf.write("\3\2\2\2\u0421\u0422\7\u0bb1\2\2\u0422\u0423\7\u0bcd\2")
buf.write("\2\u0423\u018c\3\2\2\2\u0424\u0425\7\u0bb1\2\2\u0425\u0426")
buf.write("\7\u0bce\2\2\u0426\u018e\3\2\2\2\u0427\u0428\7\u0bb2\2")
buf.write("\2\u0428\u0429\7\u0bc0\2\2\u0429\u0190\3\2\2\2\u042a\u042b")
buf.write("\7\u0bb2\2\2\u042b\u042c\7\u0bc2\2\2\u042c\u0192\3\2\2")
buf.write("\2\u042d\u042e\7\u0bb2\2\2\u042e\u042f\7\u0bc4\2\2\u042f")
buf.write("\u0194\3\2\2\2\u0430\u0431\7\u0bb2\2\2\u0431\u0432\7\u0bc9")
buf.write("\2\2\u0432\u0196\3\2\2\2\u0433\u0434\7\u0bb2\2\2\u0434")
buf.write("\u0435\7\u0bca\2\2\u0435\u0198\3\2\2\2\u0436\u0437\7\u0bb2")
buf.write("\2\2\u0437\u0438\7\u0bcd\2\2\u0438\u019a\3\2\2\2\u0439")
buf.write("\u043a\7\u0bb2\2\2\u043a\u043b\7\u0bce\2\2\u043b\u019c")
buf.write("\3\2\2\2\u043c\u043d\7\u0bb4\2\2\u043d\u043e\7\u0bc0\2")
buf.write("\2\u043e\u019e\3\2\2\2\u043f\u0440\7\u0bb4\2\2\u0440\u0441")
buf.write("\7\u0bc2\2\2\u0441\u01a0\3\2\2\2\u0442\u0443\7\u0bb4\2")
buf.write("\2\u0443\u0444\7\u0bc4\2\2\u0444\u01a2\3\2\2\2\u0445\u0446")
buf.write("\7\u0bb4\2\2\u0446\u0447\7\u0bc9\2\2\u0447\u01a4\3\2\2")
buf.write("\2\u0448\u0449\7\u0bb4\2\2\u0449\u044a\7\u0bca\2\2\u044a")
buf.write("\u01a6\3\2\2\2\u044b\u044c\7\u0bb4\2\2\u044c\u044d\7\u0bcd")
buf.write("\2\2\u044d\u01a8\3\2\2\2\u044e\u044f\7\u0bb4\2\2\u044f")
buf.write("\u0450\7\u0bce\2\2\u0450\u01aa\3\2\2\2\u0451\u0452\7\u0bb7")
buf.write("\2\2\u0452\u0453\7\u0bc0\2\2\u0453\u01ac\3\2\2\2\u0454")
buf.write("\u0455\7\u0bb7\2\2\u0455\u0456\7\u0bc2\2\2\u0456\u01ae")
buf.write("\3\2\2\2\u0457\u0458\7\u0bb7\2\2\u0458\u0459\7\u0bc4\2")
buf.write("\2\u0459\u01b0\3\2\2\2\u045a\u045b\7\u0bb7\2\2\u045b\u045c")
buf.write("\7\u0bc9\2\2\u045c\u01b2\3\2\2\2\u045d\u045e\7\u0bb7\2")
buf.write("\2\u045e\u045f\7\u0bca\2\2\u045f\u01b4\3\2\2\2\u0460\u0461")
buf.write("\7\u0bb7\2\2\u0461\u0462\7\u0bcd\2\2\u0462\u01b6\3\2\2")
buf.write("\2\u0463\u0464\7\u0bb7\2\2\u0464\u0465\7\u0bce\2\2\u0465")
buf.write("\u01b8\3\2\2\2\u0466\u0467\7\u0bb6\2\2\u0467\u0468\7\u0bc0")
buf.write("\2\2\u0468\u01ba\3\2\2\2\u0469\u046a\7\u0bb6\2\2\u046a")
buf.write("\u046b\7\u0bc2\2\2\u046b\u01bc\3\2\2\2\u046c\u046d\7\u0bb6")
buf.write("\2\2\u046d\u046e\7\u0bc4\2\2\u046e\u01be\3\2\2\2\u046f")
buf.write("\u0470\7\u0bb6\2\2\u0470\u0471\7\u0bc9\2\2\u0471\u01c0")
buf.write("\3\2\2\2\u0472\u0473\7\u0bb6\2\2\u0473\u0474\7\u0bca\2")
buf.write("\2\u0474\u01c2\3\2\2\2\u0475\u0476\7\u0bb6\2\2\u0476\u0477")
buf.write("\7\u0bcd\2\2\u0477\u01c4\3\2\2\2\u0478\u0479\7\u0bb6\2")
buf.write("\2\u0479\u047a\7\u0bce\2\2\u047a\u01c6\3\2\2\2\u047b\u047c")
buf.write("\7\u0bb5\2\2\u047c\u047d\7\u0bc0\2\2\u047d\u01c8\3\2\2")
buf.write("\2\u047e\u047f\7\u0bb5\2\2\u047f\u0480\7\u0bc2\2\2\u0480")
buf.write("\u01ca\3\2\2\2\u0481\u0482\7\u0bb5\2\2\u0482\u0483\7\u0bc4")
buf.write("\2\2\u0483\u01cc\3\2\2\2\u0484\u0485\7\u0bb5\2\2\u0485")
buf.write("\u0486\7\u0bc9\2\2\u0486\u01ce\3\2\2\2\u0487\u0488\7\u0bb5")
buf.write("\2\2\u0488\u0489\7\u0bca\2\2\u0489\u01d0\3\2\2\2\u048a")
buf.write("\u048b\7\u0bb5\2\2\u048b\u048c\7\u0bcd\2\2\u048c\u01d2")
buf.write("\3\2\2\2\u048d\u048e\7\u0bb5\2\2\u048e\u048f\7\u0bce\2")
buf.write("\2\u048f\u01d4\3\2\2\2\u0490\u0491\7\u0bb3\2\2\u0491\u0492")
buf.write("\7\u0bc0\2\2\u0492\u01d6\3\2\2\2\u0493\u0494\7\u0bb3\2")
buf.write("\2\u0494\u0495\7\u0bc2\2\2\u0495\u01d8\3\2\2\2\u0496\u0497")
buf.write("\7\u0bb3\2\2\u0497\u0498\7\u0bc4\2\2\u0498\u01da\3\2\2")
buf.write("\2\u0499\u049a\7\u0bb3\2\2\u049a\u049b\7\u0bc9\2\2\u049b")
buf.write("\u01dc\3\2\2\2\u049c\u049d\7\u0bb3\2\2\u049d\u049e\7\u0bca")
buf.write("\2\2\u049e\u01de\3\2\2\2\u049f\u04a0\7\u0bb3\2\2\u04a0")
buf.write("\u04a1\7\u0bcd\2\2\u04a1\u01e0\3\2\2\2\u04a2\u04a3\7\u0bb3")
buf.write("\2\2\u04a3\u04a4\7\u0bce\2\2\u04a4\u01e2\3\2\2\2\u04a5")
buf.write("\u04a6\7\u0bab\2\2\u04a6\u04a7\7\u0bc0\2\2\u04a7\u01e4")
buf.write("\3\2\2\2\u04a8\u04a9\7\u0bab\2\2\u04a9\u04aa\7\u0bc2\2")
buf.write("\2\u04aa\u01e6\3\2\2\2\u04ab\u04ac\7\u0bab\2\2\u04ac\u04ad")
buf.write("\7\u0bc4\2\2\u04ad\u01e8\3\2\2\2\u04ae\u04af\7\u0bab\2")
buf.write("\2\u04af\u04b0\7\u0bc9\2\2\u04b0\u01ea\3\2\2\2\u04b1\u04b2")
buf.write("\7\u0bab\2\2\u04b2\u04b3\7\u0bca\2\2\u04b3\u01ec\3\2\2")
buf.write("\2\u04b4\u04b5\7\u0bab\2\2\u04b5\u04b6\7\u0bcd\2\2\u04b6")
buf.write("\u01ee\3\2\2\2\u04b7\u04b8\7\u0bab\2\2\u04b8\u04b9\7\u0bce")
buf.write("\2\2\u04b9\u01f0\3\2\2\2\u04ba\u04bb\7\"\2\2\u04bb\u01f2")
buf.write("\3\2\2\2\3\2\2")
return buf.getvalue()
class வெண்பாLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
T__34 = 35
T__35 = 36
T__36 = 37
T__37 = 38
T__38 = 39
T__39 = 40
T__40 = 41
T__41 = 42
T__42 = 43
T__43 = 44
T__44 = 45
T__45 = 46
T__46 = 47
T__47 = 48
T__48 = 49
T__49 = 50
T__50 = 51
T__51 = 52
T__52 = 53
T__53 = 54
T__54 = 55
T__55 = 56
T__56 = 57
T__57 = 58
T__58 = 59
T__59 = 60
T__60 = 61
T__61 = 62
T__62 = 63
T__63 = 64
T__64 = 65
T__65 = 66
T__66 = 67
T__67 = 68
T__68 = 69
T__69 = 70
T__70 = 71
T__71 = 72
T__72 = 73
T__73 = 74
T__74 = 75
T__75 = 76
T__76 = 77
T__77 = 78
T__78 = 79
T__79 = 80
T__80 = 81
T__81 = 82
T__82 = 83
T__83 = 84
T__84 = 85
T__85 = 86
T__86 = 87
T__87 = 88
T__88 = 89
T__89 = 90
T__90 = 91
T__91 = 92
T__92 = 93
T__93 = 94
T__94 = 95
T__95 = 96
T__96 = 97
T__97 = 98
T__98 = 99
T__99 = 100
T__100 = 101
T__101 = 102
T__102 = 103
T__103 = 104
T__104 = 105
T__105 = 106
T__106 = 107
T__107 = 108
T__108 = 109
T__109 = 110
T__110 = 111
T__111 = 112
T__112 = 113
T__113 = 114
T__114 = 115
T__115 = 116
T__116 = 117
T__117 = 118
T__118 = 119
T__119 = 120
T__120 = 121
T__121 = 122
T__122 = 123
T__123 = 124
T__124 = 125
T__125 = 126
T__126 = 127
T__127 = 128
T__128 = 129
T__129 = 130
T__130 = 131
T__131 = 132
T__132 = 133
T__133 = 134
T__134 = 135
T__135 = 136
T__136 = 137
T__137 = 138
T__138 = 139
T__139 = 140
T__140 = 141
T__141 = 142
T__142 = 143
T__143 = 144
T__144 = 145
T__145 = 146
T__146 = 147
T__147 = 148
T__148 = 149
T__149 = 150
T__150 = 151
T__151 = 152
T__152 = 153
T__153 = 154
T__154 = 155
T__155 = 156
T__156 = 157
T__157 = 158
T__158 = 159
T__159 = 160
T__160 = 161
T__161 = 162
T__162 = 163
T__163 = 164
T__164 = 165
T__165 = 166
T__166 = 167
T__167 = 168
T__168 = 169
T__169 = 170
T__170 = 171
T__171 = 172
T__172 = 173
T__173 = 174
T__174 = 175
T__175 = 176
T__176 = 177
T__177 = 178
T__178 = 179
T__179 = 180
T__180 = 181
T__181 = 182
T__182 = 183
T__183 = 184
T__184 = 185
T__185 = 186
T__186 = 187
T__187 = 188
T__188 = 189
T__189 = 190
T__190 = 191
T__191 = 192
T__192 = 193
T__193 = 194
T__194 = 195
T__195 = 196
T__196 = 197
T__197 = 198
T__198 = 199
T__199 = 200
T__200 = 201
T__201 = 202
T__202 = 203
T__203 = 204
T__204 = 205
T__205 = 206
T__206 = 207
T__207 = 208
T__208 = 209
T__209 = 210
T__210 = 211
T__211 = 212
T__212 = 213
T__213 = 214
T__214 = 215
T__215 = 216
T__216 = 217
T__217 = 218
T__218 = 219
T__219 = 220
T__220 = 221
T__221 = 222
T__222 = 223
T__223 = 224
T__224 = 225
T__225 = 226
T__226 = 227
T__227 = 228
T__228 = 229
T__229 = 230
T__230 = 231
T__231 = 232
T__232 = 233
T__233 = 234
T__234 = 235
T__235 = 236
T__236 = 237
T__237 = 238
T__238 = 239
T__239 = 240
T__240 = 241
T__241 = 242
T__242 = 243
T__243 = 244
T__244 = 245
T__245 = 246
T__246 = 247
I = 248
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'\n'", "'\u0B95\u0BCD'", "'\u0B9A\u0BCD'", "'\u0B9F\u0BCD'",
"'\u0BA4\u0BCD'", "'\u0BAA\u0BCD'", "'\u0BB1\u0BCD'", "'\u0B9E\u0BCD'",
"'\u0B99\u0BCD'", "'\u0BA3\u0BCD'", "'\u0BA8\u0BCD'", "'\u0BAE\u0BCD'",
"'\u0BA9\u0BCD'", "'\u0BAF\u0BCD'", "'\u0BB0\u0BCD'", "'\u0BB2\u0BCD'",
"'\u0BB5\u0BCD'", "'\u0BB4\u0BCD'", "'\u0BB3\u0BCD'", "'\u0B85'",
"'\u0B87'", "'\u0B89'", "'\u0B8E'", "'\u0B92'", "'\u0B95'",
"'\u0B95\u0BBF'", "'\u0B95\u0BC1'", "'\u0B95\u0BC6'", "'\u0B95\u0BCA'",
"'\u0B99'", "'\u0B99\u0BBF'", "'\u0B99\u0BC1'", "'\u0B99\u0BC6'",
"'\u0B99\u0BCA'", "'\u0B9A'", "'\u0B9A\u0BBF'", "'\u0B9A\u0BC1'",
"'\u0B9A\u0BC6'", "'\u0B9A\u0BCA'", "'\u0B9E'", "'\u0B9E\u0BBF'",
"'\u0B9E\u0BC1'", "'\u0B9E\u0BC6'", "'\u0B9E\u0BCA'", "'\u0B9F'",
"'\u0B9F\u0BBF'", "'\u0B9F\u0BC1'", "'\u0B9F\u0BC6'", "'\u0B9F\u0BCA'",
"'\u0BA3'", "'\u0BA3\u0BBF'", "'\u0BA3\u0BC1'", "'\u0BA3\u0BC6'",
"'\u0BA3\u0BCA'", "'\u0BA4'", "'\u0BA4\u0BBF'", "'\u0BA4\u0BC1'",
"'\u0BA4\u0BC6'", "'\u0BA4\u0BCA'", "'\u0BA8'", "'\u0BA8\u0BBF'",
"'\u0BA8\u0BC1'", "'\u0BA8\u0BC6'", "'\u0BA8\u0BCA'", "'\u0BAA'",
"'\u0BAA\u0BBF'", "'\u0BAA\u0BC1'", "'\u0BAA\u0BC6'", "'\u0BAA\u0BCA'",
"'\u0BAE'", "'\u0BAE\u0BBF'", "'\u0BAE\u0BC1'", "'\u0BAE\u0BC6'",
"'\u0BAE\u0BCA'", "'\u0BAF'", "'\u0BAF\u0BBF'", "'\u0BAF\u0BC1'",
"'\u0BAF\u0BC6'", "'\u0BAF\u0BCA'", "'\u0BB0'", "'\u0BB0\u0BBF'",
"'\u0BB0\u0BC1'", "'\u0BB0\u0BC6'", "'\u0BB0\u0BCA'", "'\u0BB2'",
"'\u0BB2\u0BBF'", "'\u0BB2\u0BC1'", "'\u0BB2\u0BC6'", "'\u0BB2\u0BCA'",
"'\u0BB5'", "'\u0BB5\u0BBF'", "'\u0BB5\u0BC1'", "'\u0BB5\u0BC6'",
"'\u0BB5\u0BCA'", "'\u0BB4'", "'\u0BB4\u0BBF'", "'\u0BB4\u0BC1'",
"'\u0BB4\u0BC6'", "'\u0BB4\u0BCA'", "'\u0BB3'", "'\u0BB3\u0BBF'",
"'\u0BB3\u0BC1'", "'\u0BB3\u0BC6'", "'\u0BB3\u0BCA'", "'\u0BB1'",
"'\u0BB1\u0BBF'", "'\u0BB1\u0BC1'", "'\u0BB1\u0BC6'", "'\u0BB1\u0BCA'",
"'\u0BA9'", "'\u0BA9\u0BBF'", "'\u0BA9\u0BC1'", "'\u0BA9\u0BC6'",
"'\u0BA9\u0BCA'", "'\u0B86'", "'\u0B88'", "'\u0B8A'", "'\u0B8F'",
"'\u0B90'", "'\u0B93'", "'\u0B92\u0BB3'", "'\u0B95\u0BBE'",
"'\u0B95\u0BC0'", "'\u0B95\u0BC2'", "'\u0B95\u0BC7'", "'\u0B95\u0BC8'",
"'\u0B95\u0BCB'", "'\u0B95\u0BCC'", "'\u0B99\u0BBE'", "'\u0B99\u0BC0'",
"'\u0B99\u0BC2'", "'\u0B99\u0BC7'", "'\u0B99\u0BC8'", "'\u0B99\u0BCB'",
"'\u0B99\u0BCC'", "'\u0B9A\u0BBE'", "'\u0B9A\u0BC0'", "'\u0B9A\u0BC2'",
"'\u0B9A\u0BC7'", "'\u0B9A\u0BC8'", "'\u0B9A\u0BCB'", "'\u0B9A\u0BCC'",
"'\u0B9E\u0BBE'", "'\u0B9E\u0BC0'", "'\u0B9E\u0BC2'", "'\u0B9E\u0BC7'",
"'\u0B9E\u0BC8'", "'\u0B9E\u0BCB'", "'\u0B9E\u0BCC'", "'\u0B9F\u0BBE'",
"'\u0B9F\u0BC0'", "'\u0B9F\u0BC2'", "'\u0B9F\u0BC7'", "'\u0B9F\u0BC8'",
"'\u0B9F\u0BCB'", "'\u0B9F\u0BCC'", "'\u0BA3\u0BBE'", "'\u0BA3\u0BC0'",
"'\u0BA3\u0BC2'", "'\u0BA3\u0BC7'", "'\u0BA3\u0BC8'", "'\u0BA3\u0BCB'",
"'\u0BA3\u0BCC'", "'\u0BA4\u0BBE'", "'\u0BA4\u0BC0'", "'\u0BA4\u0BC2'",
"'\u0BA4\u0BC7'", "'\u0BA4\u0BC8'", "'\u0BA4\u0BCB'", "'\u0BA4\u0BCC'",
"'\u0BA8\u0BBE'", "'\u0BA8\u0BC0'", "'\u0BA8\u0BC2'", "'\u0BA8\u0BC7'",
"'\u0BA8\u0BC8'", "'\u0BA8\u0BCB'", "'\u0BA8\u0BCC'", "'\u0BAA\u0BBE'",
"'\u0BAA\u0BC0'", "'\u0BAA\u0BC2'", "'\u0BAA\u0BC7'", "'\u0BAA\u0BC8'",
"'\u0BAA\u0BCB'", "'\u0BAA\u0BCC'", "'\u0BAE\u0BBE'", "'\u0BAE\u0BC0'",
"'\u0BAE\u0BC2'", "'\u0BAE\u0BC7'", "'\u0BAE\u0BC8'", "'\u0BAE\u0BCB'",
"'\u0BAE\u0BCC'", "'\u0BAF\u0BBE'", "'\u0BAF\u0BC0'", "'\u0BAF\u0BC2'",
"'\u0BAF\u0BC7'", "'\u0BAF\u0BC8'", "'\u0BAF\u0BCB'", "'\u0BAF\u0BCC'",
"'\u0BB0\u0BBE'", "'\u0BB0\u0BC0'", "'\u0BB0\u0BC2'", "'\u0BB0\u0BC7'",
"'\u0BB0\u0BC8'", "'\u0BB0\u0BCB'", "'\u0BB0\u0BCC'", "'\u0BB2\u0BBE'",
"'\u0BB2\u0BC0'", "'\u0BB2\u0BC2'", "'\u0BB2\u0BC7'", "'\u0BB2\u0BC8'",
"'\u0BB2\u0BCB'", "'\u0BB2\u0BCC'", "'\u0BB5\u0BBE'", "'\u0BB5\u0BC0'",
"'\u0BB5\u0BC2'", "'\u0BB5\u0BC7'", "'\u0BB5\u0BC8'", "'\u0BB5\u0BCB'",
"'\u0BB5\u0BCC'", "'\u0BB4\u0BBE'", "'\u0BB4\u0BC0'", "'\u0BB4\u0BC2'",
"'\u0BB4\u0BC7'", "'\u0BB4\u0BC8'", "'\u0BB4\u0BCB'", "'\u0BB4\u0BCC'",
"'\u0BB3\u0BBE'", "'\u0BB3\u0BC0'", "'\u0BB3\u0BC2'", "'\u0BB3\u0BC7'",
"'\u0BB3\u0BC8'", "'\u0BB3\u0BCB'", "'\u0BB3\u0BCC'", "'\u0BB1\u0BBE'",
"'\u0BB1\u0BC0'", "'\u0BB1\u0BC2'", "'\u0BB1\u0BC7'", "'\u0BB1\u0BC8'",
"'\u0BB1\u0BCB'", "'\u0BB1\u0BCC'", "'\u0BA9\u0BBE'", "'\u0BA9\u0BC0'",
"'\u0BA9\u0BC2'", "'\u0BA9\u0BC7'", "'\u0BA9\u0BC8'", "'\u0BA9\u0BCB'",
"'\u0BA9\u0BCC'", "' '" ]
symbolicNames = [ "<INVALID>",
"I" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "T__34", "T__35", "T__36", "T__37",
"T__38", "T__39", "T__40", "T__41", "T__42", "T__43",
"T__44", "T__45", "T__46", "T__47", "T__48", "T__49",
"T__50", "T__51", "T__52", "T__53", "T__54", "T__55",
"T__56", "T__57", "T__58", "T__59", "T__60", "T__61",
"T__62", "T__63", "T__64", "T__65", "T__66", "T__67",
"T__68", "T__69", "T__70", "T__71", "T__72", "T__73",
"T__74", "T__75", "T__76", "T__77", "T__78", "T__79",
"T__80", "T__81", "T__82", "T__83", "T__84", "T__85",
"T__86", "T__87", "T__88", "T__89", "T__90", "T__91",
"T__92", "T__93", "T__94", "T__95", "T__96", "T__97",
"T__98", "T__99", "T__100", "T__101", "T__102", "T__103",
"T__104", "T__105", "T__106", "T__107", "T__108", "T__109",
"T__110", "T__111", "T__112", "T__113", "T__114", "T__115",
"T__116", "T__117", "T__118", "T__119", "T__120", "T__121",
"T__122", "T__123", "T__124", "T__125", "T__126", "T__127",
"T__128", "T__129", "T__130", "T__131", "T__132", "T__133",
"T__134", "T__135", "T__136", "T__137", "T__138", "T__139",
"T__140", "T__141", "T__142", "T__143", "T__144", "T__145",
"T__146", "T__147", "T__148", "T__149", "T__150", "T__151",
"T__152", "T__153", "T__154", "T__155", "T__156", "T__157",
"T__158", "T__159", "T__160", "T__161", "T__162", "T__163",
"T__164", "T__165", "T__166", "T__167", "T__168", "T__169",
"T__170", "T__171", "T__172", "T__173", "T__174", "T__175",
"T__176", "T__177", "T__178", "T__179", "T__180", "T__181",
"T__182", "T__183", "T__184", "T__185", "T__186", "T__187",
"T__188", "T__189", "T__190", "T__191", "T__192", "T__193",
"T__194", "T__195", "T__196", "T__197", "T__198", "T__199",
"T__200", "T__201", "T__202", "T__203", "T__204", "T__205",
"T__206", "T__207", "T__208", "T__209", "T__210", "T__211",
"T__212", "T__213", "T__214", "T__215", "T__216", "T__217",
"T__218", "T__219", "T__220", "T__221", "T__222", "T__223",
"T__224", "T__225", "T__226", "T__227", "T__228", "T__229",
"T__230", "T__231", "T__232", "T__233", "T__234", "T__235",
"T__236", "T__237", "T__238", "T__239", "T__240", "T__241",
"T__242", "T__243", "T__244", "T__245", "T__246", "I" ]
grammarFileName = "வெண்பா.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 60.076152
| 103
| 0.581243
| 12,603
| 59,956
| 2.685789
| 0.13227
| 0.121481
| 0.075866
| 0.075748
| 0.222843
| 0.150787
| 0.048894
| 0.023457
| 0.020503
| 0.020001
| 0
| 0.373141
| 0.157966
| 59,956
| 997
| 104
| 60.136409
| 0.297118
| 0.001501
| 0
| 0
| 1
| 0.307457
| 0.636531
| 0.556296
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002043
| false
| 0
| 0.004086
| 0
| 0.269663
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71e20ef0f9953479ef0a028ff6d682594d6da0d3
| 1,797
|
py
|
Python
|
anaf/core/migrations/0002_default_timezone_now_fix.py
|
tovmeod/anaf
|
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
|
[
"BSD-3-Clause"
] | 2
|
2016-03-15T13:17:26.000Z
|
2017-03-22T15:39:01.000Z
|
anaf/core/migrations/0002_default_timezone_now_fix.py
|
tovmeod/anaf
|
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
|
[
"BSD-3-Clause"
] | 4
|
2021-03-19T21:42:58.000Z
|
2022-03-11T23:13:07.000Z
|
anaf/core/migrations/0002_default_timezone_now_fix.py
|
tovmeod/anaf
|
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
|
[
"BSD-3-Clause"
] | 4
|
2016-08-31T16:55:41.000Z
|
2020-04-22T18:48:54.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='invitation',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='object',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='revision',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='tag',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='updaterecord',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='last_access',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
]
| 30.982759
| 74
| 0.588759
| 160
| 1,797
| 6.44375
| 0.25625
| 0.085354
| 0.14743
| 0.196896
| 0.744908
| 0.744908
| 0.744908
| 0.744908
| 0.744908
| 0.744908
| 0
| 0.004
| 0.304396
| 1,797
| 57
| 75
| 31.526316
| 0.8208
| 0.011686
| 0
| 0.666667
| 0
| 0
| 0.083991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e0805a2b3b02514561f5dcd8f05a0cbdd1a18c5e
| 133
|
py
|
Python
|
src/pybel/struct/mutation/inference/__init__.py
|
tehw0lf/pybel
|
6f67f8cce15052cc3c42ef87374e3b9ee45e6519
|
[
"Apache-2.0"
] | null | null | null |
src/pybel/struct/mutation/inference/__init__.py
|
tehw0lf/pybel
|
6f67f8cce15052cc3c42ef87374e3b9ee45e6519
|
[
"Apache-2.0"
] | null | null | null |
src/pybel/struct/mutation/inference/__init__.py
|
tehw0lf/pybel
|
6f67f8cce15052cc3c42ef87374e3b9ee45e6519
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import protein_rna_origins
from .protein_rna_origins import *
__all__ = protein_rna_origins.__all__
| 19
| 37
| 0.759398
| 18
| 133
| 4.833333
| 0.5
| 0.344828
| 0.586207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 0.135338
| 133
| 6
| 38
| 22.166667
| 0.747826
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0f09678aab214dd66684b39329d7d8ade31bc6d
| 25
|
py
|
Python
|
tapispy/__init__.py
|
tapis-project/tapispy
|
fc7d5e79f8b5a73fa0517e6129f737dd753c2561
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
tapispy/__init__.py
|
tapis-project/tapispy
|
fc7d5e79f8b5a73fa0517e6129f737dd753c2561
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
tapispy/__init__.py
|
tapis-project/tapispy
|
fc7d5e79f8b5a73fa0517e6129f737dd753c2561
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
from .tapis import Tapis
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cc8556a0dcb1d9ba1d6910dccb228c2c970b4f0
| 6,940
|
py
|
Python
|
tormysql/helpers.py
|
naujoh/TorMySQL
|
09123c58f52d015ef59b490c346b20e72bcd5e30
|
[
"MIT"
] | 340
|
2015-02-02T22:21:22.000Z
|
2022-02-24T06:52:23.000Z
|
tormysql/helpers.py
|
naujoh/TorMySQL
|
09123c58f52d015ef59b490c346b20e72bcd5e30
|
[
"MIT"
] | 42
|
2015-02-12T15:01:44.000Z
|
2020-11-18T02:03:22.000Z
|
tormysql/helpers.py
|
naujoh/TorMySQL
|
09123c58f52d015ef59b490c346b20e72bcd5e30
|
[
"MIT"
] | 78
|
2015-02-12T11:53:15.000Z
|
2022-02-03T07:30:18.000Z
|
# -*- coding: utf-8 -*-
# 16/3/25
# create by: snower
import sys
from . import platform
try:
from tornado.util import raise_exc_info
except ImportError:
def raise_exc_info(exc_info):
try:
raise exc_info[1].with_traceback(exc_info[2])
finally:
exc_info = None
from .pool import ConnectionPool as BaseConnectionPool
from . import log
try:
from tornado.gen import Return
except ImportError:
pass
from .util import py3
class TransactionClosedError(Exception):
pass
class Transaction(object):
def __init__(self, pool, connection):
self._pool = pool
self._connection = connection
def _ensure_conn(self):
if self._connection is None:
raise TransactionClosedError("Transaction is closed already.")
if py3:
exec("""
async def execute(self, query, params=None, cursor_cls=None):
self._ensure_conn()
async with self._connection.cursor(cursor_cls) as cursor:
await cursor.execute(query, params)
return cursor
async def executemany(self, query, params=None, cursor_cls=None):
self._ensure_conn()
async with self._connection.cursor(cursor_cls) as cursor:
await cursor.executemany(query, params)
return cursor
async def commit(self):
self._ensure_conn()
try:
await self._connection.commit()
except:
exc_info = sys.exc_info()
self._connection.close(True)
raise_exc_info(exc_info)
else:
self._connection.close()
finally:
self._connection = None
async def rollback(self):
self._ensure_conn()
try:
await self._connection.rollback()
except:
exc_info = sys.exc_info()
self._connection.close(True)
raise_exc_info(exc_info)
else:
self._connection.close()
finally:
self._connection = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type:
await self.rollback()
else:
await self.commit()
""")
else:
@platform.coroutine
def execute(self, query, params=None, cursor_cls=None):
self._ensure_conn()
cursor = self._connection.cursor(cursor_cls)
try:
yield cursor.execute(query, params)
finally:
yield cursor.close()
raise Return(cursor)
@platform.coroutine
def executemany(self, query, params=None, cursor_cls=None):
self._ensure_conn()
cursor = self._connection.cursor(cursor_cls)
try:
yield cursor.executemany(query, params)
finally:
yield cursor.close()
raise Return(cursor)
@platform.coroutine
def commit(self):
self._ensure_conn()
try:
yield self._connection.commit()
except:
exc_info = sys.exc_info()
self._connection.close(True)
raise_exc_info(exc_info)
else:
self._connection.close()
finally:
self._connection = None
@platform.coroutine
def rollback(self):
self._ensure_conn()
try:
yield self._connection.rollback()
except:
exc_info = sys.exc_info()
self._connection.close(True)
raise_exc_info(exc_info)
else:
self._connection.close()
finally:
self._connection = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._connection:
log.get_log().warning("Transaction has not committed or rollbacked %s.", self._connection)
def __del__(self):
if self._connection:
log.get_log().warning("Transaction has not committed or rollbacked %s.", self._connection)
self._connection.do_close()
self._connection = None
class ConnectionPool(BaseConnectionPool):
def __init__(self, *args, **kwargs):
super(ConnectionPool, self).__init__(*args, **kwargs)
if py3:
exec("""
async def execute(self, query, params=None, cursor_cls=None):
async with await self.Connection() as connection:
async with connection.cursor(cursor_cls) as cursor:
await cursor.execute(query, params)
return cursor
async def executemany(self, query, params=None, cursor_cls=None):
async with await self.Connection() as connection:
async with connection.cursor(cursor_cls) as cursor:
await cursor.executemany(query, params)
return cursor
async def begin(self):
connection = await self.Connection()
try:
await connection.begin()
except:
exc_info = sys.exc_info()
connection.close()
raise_exc_info(exc_info)
transaction = Transaction(self, connection)
return transaction
""")
else:
@platform.coroutine
def execute(self, query, params=None, cursor_cls=None):
with (yield self.Connection()) as connection:
cursor = connection.cursor(cursor_cls)
try:
yield cursor.execute(query, params)
if not connection._connection.autocommit_mode:
yield connection.commit()
except:
exc_info = sys.exc_info()
if not connection._connection.autocommit_mode:
yield connection.rollback()
raise_exc_info(exc_info)
finally:
yield cursor.close()
raise Return(cursor)
@platform.coroutine
def executemany(self, query, params=None, cursor_cls=None):
with (yield self.Connection()) as connection:
cursor = connection.cursor(cursor_cls)
try:
yield cursor.executemany(query, params)
if not connection._connection.autocommit_mode:
yield connection.commit()
except:
exc_info = sys.exc_info()
if not connection._connection.autocommit_mode:
yield connection.rollback()
raise_exc_info(exc_info)
finally:
yield cursor.close()
raise Return(cursor)
@platform.coroutine
def begin(self):
connection = yield self.Connection()
try:
yield connection.begin()
except:
exc_info = sys.exc_info()
connection.close()
raise_exc_info(exc_info)
transaction = Transaction(self, connection)
raise Return(transaction)
| 30.844444
| 102
| 0.581988
| 725
| 6,940
| 5.347586
| 0.124138
| 0.06861
| 0.034047
| 0.034821
| 0.766056
| 0.761156
| 0.761156
| 0.75316
| 0.71447
| 0.71447
| 0
| 0.00238
| 0.33415
| 6,940
| 225
| 103
| 30.844444
| 0.836615
| 0.006772
| 0
| 0.764103
| 0
| 0
| 0.314224
| 0.065022
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071795
| false
| 0.010256
| 0.046154
| 0.005128
| 0.169231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1cc959fa7cc8310098bab0e97416519c8240c566
| 216
|
py
|
Python
|
src/middlewares/__init__.py
|
DmitryKovtun/text-splitter
|
a1d97d2d7f74eeb86b185f2356cabb718f010310
|
[
"MIT"
] | 1
|
2020-06-21T02:23:31.000Z
|
2020-06-21T02:23:31.000Z
|
src/middlewares/__init__.py
|
DmitryKovtun/text-splitter
|
a1d97d2d7f74eeb86b185f2356cabb718f010310
|
[
"MIT"
] | null | null | null |
src/middlewares/__init__.py
|
DmitryKovtun/text-splitter
|
a1d97d2d7f74eeb86b185f2356cabb718f010310
|
[
"MIT"
] | 1
|
2020-06-21T02:23:32.000Z
|
2020-06-21T02:23:32.000Z
|
# -*- coding: utf-8 -*-
from .add_rmq_object_to_request_middleware import AddRMQObjectToRequestMiddleware
from .http_proxy_middleware import HttpProxyMiddleware
from .log_errors_middleware import LogErrorsMiddleware
| 43.2
| 81
| 0.865741
| 24
| 216
| 7.416667
| 0.75
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005025
| 0.078704
| 216
| 4
| 82
| 54
| 0.889447
| 0.097222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8087b00d5edf28aebfffc6d1860e32437af15d7
| 180
|
py
|
Python
|
fabfile.py
|
alan-turing-institute/Turing-Fabric-Scaffold
|
cf3fb34ca0cfd29124904cc4f0e3b26f27ae2287
|
[
"MIT"
] | null | null | null |
fabfile.py
|
alan-turing-institute/Turing-Fabric-Scaffold
|
cf3fb34ca0cfd29124904cc4f0e3b26f27ae2287
|
[
"MIT"
] | null | null | null |
fabfile.py
|
alan-turing-institute/Turing-Fabric-Scaffold
|
cf3fb34ca0cfd29124904cc4f0e3b26f27ae2287
|
[
"MIT"
] | null | null | null |
from deploy.intel.intel import intel
from deploy.jade.jade import jade
from deploy.cirrus.cirrus import cirrus
from deploy.builder.build import *
from deploy.queue.queue import *
| 25.714286
| 39
| 0.816667
| 28
| 180
| 5.25
| 0.321429
| 0.340136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 180
| 6
| 40
| 30
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8162f5db979c86f506fb75ca585a040644e49a2
| 18,531
|
py
|
Python
|
example/rcnn/rcnn/symbol.py
|
Abusnina/mxnet
|
7f8d94a24bf64fe0f24712a7952a09725c2df9bd
|
[
"Apache-2.0"
] | 6
|
2017-06-09T02:32:10.000Z
|
2020-03-18T03:17:00.000Z
|
example/rcnn/rcnn/symbol.py
|
Abusnina/mxnet
|
7f8d94a24bf64fe0f24712a7952a09725c2df9bd
|
[
"Apache-2.0"
] | 1
|
2020-01-26T19:53:49.000Z
|
2020-01-26T19:53:49.000Z
|
example/rcnn/rcnn/symbol.py
|
Abusnina/mxnet
|
7f8d94a24bf64fe0f24712a7952a09725c2df9bd
|
[
"Apache-2.0"
] | 14
|
2016-11-18T07:21:41.000Z
|
2019-09-30T08:48:22.000Z
|
import mxnet as mx
import rpn.proposal, rpn.proposal_target
from config import config
def get_vgg_conv(data):
"""
shared convolutional layers
:param data: Symbol
:return: Symbol
"""
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
return relu5_3
def get_vgg_rcnn(num_classes=21):
"""
Fast R-CNN with VGG 16 conv layers
:param num_classes: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
rois = mx.symbol.Variable(name='rois')
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_inside_weight = mx.symbol.Variable(name='bbox_inside_weight')
bbox_outside_weight = mx.symbol.Variable(name='bbox_outside_weight')
# reshape input
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
label = mx.symbol.Reshape(data=label, shape=(-1, ), name='label_reshape')
bbox_target = mx.symbol.Reshape(data=bbox_target, shape=(-1, 4 * num_classes), name='bbox_target_reshape')
bbox_inside_weight = mx.symbol.Reshape(data=bbox_inside_weight, shape=(-1, 4 * num_classes), name='bbox_inside_weight_reshape')
bbox_outside_weight = mx.symbol.Reshape(data=bbox_outside_weight, shape=(-1, 4 * num_classes), name='bbox_outside_weight_reshape')
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=0.0625)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
bbox_loss_ = bbox_outside_weight * \
mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0,
data=bbox_inside_weight * (bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
# group output
group = mx.symbol.Group([cls_prob, bbox_loss])
return group
def get_vgg_rcnn_test(num_classes=21):
"""
Fast R-CNN Network with VGG
:param num_classes: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
rois = mx.symbol.Variable(name='rois')
# reshape rois
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
# shared convolutional layer
relu5_3 = get_vgg_conv(data)
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=0.0625)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([cls_prob, bbox_pred])
return group
def get_vgg_rpn(num_classes=21, num_anchors=9):
"""
Region Proposal Network with VGG
:param num_classes: used to determine output size
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_inside_weight = mx.symbol.Variable(name='bbox_inside_weight')
bbox_outside_weight = mx.symbol.Variable(name='bbox_outside_weight')
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1), name="rpn_cls_score_reshape")
# classification
cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="cls_prob")
# bounding box regression
bbox_loss_ = bbox_outside_weight * \
mx.symbol.smooth_l1(name='bbox_loss_', scalar=3.0,
data=bbox_inside_weight * (rpn_bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_)
# group output
group = mx.symbol.Group([cls_prob, bbox_loss])
return group
def get_vgg_rpn_test(num_classes=21, num_anchors=9):
"""
Region Proposal Network with VGG
:param num_classes: used to determine output size
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
group = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=16, scales=(8, 16, 32), ratios=(0.5, 1, 2), output_score=True)
# rois = group[0]
# score = group[1]
return group
def get_vgg_test(num_classes=21, num_anchors=9):
"""
Faster R-CNN test with VGG 16 conv layers
:param num_classes: used to determine output size
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=16, scales=(8, 16, 32), ratios=(0.5, 1, 2))
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=0.0625)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
def get_faster_rcnn(num_classes=21, num_anchors=9):
"""
Faster R-CNN with VGG 16 conv layers
:param num_classes: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# label
gt_boxes = mx.symbol.Variable(name="gt_boxes")
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_inside_weight = mx.symbol.Variable(name='bbox_inside_weight')
bbox_outside_weight = mx.symbol.Variable(name='bbox_outside_weight')
gt_boxes = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
relu5_3 = get_vgg_conv(data)
## RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_loss = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_loss")
# bounding box regression
rpn_bbox_loss_ = bbox_outside_weight * \
mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0,
data=bbox_inside_weight * (rpn_bbox_pred - bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_)
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
rpn_roi = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rpn_rois',
op_type='proposal', feat_stride=16, scales=(8, 16, 32), ratios=(0.5, 1, 2), is_train=True) # TODO(be careful of cls_prob)
rois = mx.symbol.Custom(
rpn_roi=rpn_roi, gt_boxes=gt_boxes, name='rois', op_type='proposal_target',
num_classes=num_classes, is_train=True) #
# R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois[0], pooled_size=(7, 7), spatial_scale=0.0625)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=rois[1], normalization='batch')
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
bbox_loss_ = rois[4] * \
mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0,
data=rois[3] * (bbox_pred - rois[2]))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_SIZE)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.IMS_PER_BATCH, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.IMS_PER_BATCH, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([mx.sym.BlockGrad(rois[1]), rpn_cls_loss, rpn_bbox_loss, cls_prob, bbox_pred]) # rois[1] is used for evaluation
return group
| 47.883721
| 140
| 0.680104
| 2,829
| 18,531
| 4.218452
| 0.056911
| 0.103234
| 0.039802
| 0.048182
| 0.895341
| 0.801911
| 0.786995
| 0.782386
| 0.770153
| 0.757248
| 0
| 0.04519
| 0.176029
| 18,531
| 387
| 141
| 47.883721
| 0.736394
| 0.089634
| 0
| 0.607438
| 0
| 0
| 0.097728
| 0.008234
| 0
| 0
| 0
| 0.002584
| 0
| 1
| 0.028926
| false
| 0
| 0.012397
| 0
| 0.070248
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e81a08f3340fbc962a95ea79cfc1753dcd7baa14
| 4,208
|
py
|
Python
|
botWhatsapp.py
|
bigpedrolucas/BotWhatsapp-Python
|
d7aab4891ee9580a86d60d3d939755635f7d3b01
|
[
"MIT"
] | null | null | null |
botWhatsapp.py
|
bigpedrolucas/BotWhatsapp-Python
|
d7aab4891ee9580a86d60d3d939755635f7d3b01
|
[
"MIT"
] | null | null | null |
botWhatsapp.py
|
bigpedrolucas/BotWhatsapp-Python
|
d7aab4891ee9580a86d60d3d939755635f7d3b01
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
contatos = open("./contatos.txt", 'r', encoding='UTF-8')
contatos = contatos.read()
contatos = contatos.split('\n')
print('\n--- ROBÔ WHATSAPP PARA ENVIO DE MENSAGENS ---')
print('Enviar texto [1]\nEnviar imagem [2]\nEnviar texto e imagem [3]')
opcao = input('\nDigite a sua opção: ')
class WhatsappBot:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('lang=pt-br')
ser = Service("./chromedriver.exe")
op = webdriver.ChromeOptions()
self.driver = webdriver.Chrome(service=ser, options=op)
def EnviarMensagens(self):
mensagem = open("./mensagem.txt", 'r', encoding='UTF-8')
mensagem = mensagem.read()
self.driver.get('https://web.whatsapp.com')
time.sleep(10)
for contato in contatos:
pesquisa = self.driver.find_element(By.CLASS_NAME, "_13NKt")
time.sleep(2)
pesquisa.click()
time.sleep(2)
pesquisa.send_keys(contato)
time.sleep(2)
pesquisa.send_keys(Keys.ENTER)
chat_box = self.driver.find_element(By.CLASS_NAME, 'p3_M1')
time.sleep(3)
chat_box.click()
time.sleep(3)
chat_box.send_keys(mensagem)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(10)
def EnviarMidia(self):
midia = "C:\\caminho\\para\\seu\\arquivo\\de\\midia.jpeg"
self.driver.get('https://web.whatsapp.com')
time.sleep(10)
for contato in contatos:
pesquisa = self.driver.find_element(By.CLASS_NAME, "_13NKt")
time.sleep(2)
pesquisa.click()
time.sleep(2)
pesquisa.send_keys(contato)
time.sleep(2)
pesquisa.send_keys(Keys.ENTER)
self.driver.find_element(By.CSS_SELECTOR, "span[data-icon='clip']").click()
attach = self.driver.find_element(By.XPATH, '//input[@accept="image/*,video/mp4,video/3gpp,video/quicktime"]')
time.sleep(3)
attach.send_keys(midia)
time.sleep(5)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(10)
def EnviarAmbos(self):
midia = "C:\\caminho\\para\\seu\\arquivo\\de\\midia.jpeg"
mensagem = open("./mensagem.txt", 'r', encoding='UTF-8')
mensagem = mensagem.read()
self.driver.get('https://web.whatsapp.com')
time.sleep(10)
for contato in contatos:
pesquisa = self.driver.find_element(By.CLASS_NAME, "_13NKt")
time.sleep(2)
pesquisa.click()
time.sleep(2)
pesquisa.send_keys(contato)
time.sleep(2)
pesquisa.send_keys(Keys.ENTER)
self.driver.find_element(By.CSS_SELECTOR, "span[data-icon='clip']").click()
attach = self.driver.find_element(By.XPATH, '//input[@accept="image/*,video/mp4,video/3gpp,video/quicktime"]')
time.sleep(3)
attach.send_keys(midia)
time.sleep(5)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(5)
chat_box = self.driver.find_element(By.CLASS_NAME, 'p3_M1')
time.sleep(3)
chat_box.click()
time.sleep(3)
chat_box.send_keys(mensagem)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(10)
bot = WhatsappBot()
if opcao == "1":
bot.EnviarMensagens()
if opcao == "2":
bot.EnviarMidia()
if opcao == "3":
bot.EnviarAmbos()
| 37.571429
| 122
| 0.576046
| 502
| 4,208
| 4.721116
| 0.213147
| 0.106329
| 0.076793
| 0.11519
| 0.728692
| 0.721941
| 0.721941
| 0.721941
| 0.721941
| 0.721941
| 0
| 0.018899
| 0.28327
| 4,208
| 112
| 123
| 37.571429
| 0.76691
| 0
| 0
| 0.730769
| 0
| 0
| 0.163459
| 0.086481
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.048077
| 0
| 0.096154
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c1ccdf87fce069d2f9e9911d7264f8abe0d55d0
| 705
|
py
|
Python
|
Curso_Gustavo_Guanabara/Exercicio028_melhorado.py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
Curso_Gustavo_Guanabara/Exercicio028_melhorado.py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
Curso_Gustavo_Guanabara/Exercicio028_melhorado.py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
import random
n = random.randrange(0,5)
n2 = int(input('Digite um número = '))
c = 1
if n2 == n:
print('Você acertou o número que o pc escolheu ')
print('Seu numero foi {} o que do pc {} '.format(n2, n))
print('Foi necessario apenas {} tentativa para acertar'.format(c))
while n != n2:
print('Você errou o número que o pc escolheu ')
print('Seu numero foi {} o que do pc {} '.format(n2, n))
n = random.randrange(0, 5)
n2 = int(input('Digite um número = '))
c += 1
if n == n2:
print('Você acertou o número que o pc escolheu ')
print('Seu numero foi {} o que do pc {} '.format(n2, n))
print('Foi necessario {} tentativas para acertar'.format(c))
| 35.25
| 70
| 0.604255
| 114
| 705
| 3.736842
| 0.307018
| 0.028169
| 0.056338
| 0.077465
| 0.762911
| 0.762911
| 0.762911
| 0.762911
| 0.762911
| 0.762911
| 0
| 0.026415
| 0.248227
| 705
| 20
| 71
| 35.25
| 0.777358
| 0
| 0
| 0.5
| 0
| 0
| 0.485836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.444444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1c6001a93ff50185eb1e9d77bbc9eda6f12df0df
| 7,395
|
py
|
Python
|
tests/contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/test_michelson_coding_KT1TTD.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2021-05-20T16:52:08.000Z
|
2021-05-20T16:52:08.000Z
|
tests/contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/test_michelson_coding_KT1TTD.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/test_michelson_coding_KT1TTD.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1TTD(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1TTD(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/code_KT1TTD.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/code_KT1TTD.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1TTD(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/code_KT1TTD.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/code_KT1TTD.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1TTD(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/code_KT1TTD.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1TTD(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/storage_KT1TTD.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/storage_KT1TTD.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1TTD(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/storage_KT1TTD.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/storage_KT1TTD.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1TTD(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/storage_KT1TTD.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooySj1(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooySj1.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooySj1.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooySj1(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooySj1.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooySj1.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooySj1(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooySj1.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ontKCo(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ontKCo.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ontKCo.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ontKCo(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ontKCo.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ontKCo.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ontKCo(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ontKCo.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooJ4W9(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooJ4W9.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooJ4W9.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooJ4W9(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooJ4W9.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooJ4W9.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooJ4W9(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_ooJ4W9.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_op2cp1(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_op2cp1.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_op2cp1.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_op2cp1(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_op2cp1.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_op2cp1.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_op2cp1(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_op2cp1.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onxPie(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_onxPie.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_onxPie.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onxPie(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_onxPie.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_onxPie.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onxPie(self):
expected = get_data(
path='contracts/KT1TTDdZqEcVQoPciqLWX5aT9GmfCiW1WDGV/parameter_onxPie.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| 46.509434
| 90
| 0.734145
| 692
| 7,395
| 7.534682
| 0.060694
| 0.048331
| 0.07384
| 0.134254
| 0.953203
| 0.953203
| 0.953203
| 0.953203
| 0.937476
| 0.937476
| 0
| 0.032943
| 0.191346
| 7,395
| 158
| 91
| 46.803797
| 0.838963
| 0
| 0
| 0.631579
| 0
| 0
| 0.31332
| 0.31332
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.165414
| false
| 0
| 0.030075
| 0
| 0.203008
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c8b69038e81f2ece7527dc2d825ecb2e09895fc
| 277
|
py
|
Python
|
nlpatl/sampling/uncertainty/__init__.py
|
dumpmemory/nlpatl
|
59209242d1ac26714b11b86261070ac50cc90432
|
[
"MIT"
] | 18
|
2021-11-29T06:43:46.000Z
|
2022-03-29T09:58:32.000Z
|
nlpatl/sampling/uncertainty/__init__.py
|
dumpmemory/nlpatl
|
59209242d1ac26714b11b86261070ac50cc90432
|
[
"MIT"
] | null | null | null |
nlpatl/sampling/uncertainty/__init__.py
|
dumpmemory/nlpatl
|
59209242d1ac26714b11b86261070ac50cc90432
|
[
"MIT"
] | 1
|
2021-11-29T06:43:47.000Z
|
2021-11-29T06:43:47.000Z
|
from nlpatl.sampling.uncertainty.entropy import EntropySampling
from nlpatl.sampling.uncertainty.least_confidence import LeastConfidenceSampling
from nlpatl.sampling.uncertainty.margin import MarginSampling
from nlpatl.sampling.uncertainty.mismatch import MismatchSampling
| 55.4
| 81
| 0.884477
| 29
| 277
| 8.413793
| 0.482759
| 0.163934
| 0.295082
| 0.47541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072202
| 277
| 4
| 82
| 69.25
| 0.949416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
98fad854692537b76eb3eb3ab2f8aa9f208356e4
| 279
|
py
|
Python
|
Ska/engarchive/tests/test_units_reversed.py
|
jmodelcxc/eng_archive
|
04017f062ef1ab023a58b6d9e5bde19992c8398c
|
[
"BSD-3-Clause"
] | null | null | null |
Ska/engarchive/tests/test_units_reversed.py
|
jmodelcxc/eng_archive
|
04017f062ef1ab023a58b6d9e5bde19992c8398c
|
[
"BSD-3-Clause"
] | 64
|
2015-02-25T20:25:41.000Z
|
2022-03-26T23:39:03.000Z
|
Ska/engarchive/tests/test_units_reversed.py
|
jmodelcxc/eng_archive
|
04017f062ef1ab023a58b6d9e5bde19992c8398c
|
[
"BSD-3-Clause"
] | 4
|
2016-01-28T19:48:53.000Z
|
2021-02-04T16:38:03.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Import in reverse order from test_units.py
from .. import fetch as fetch_cxc # noqa
from .. import fetch_eng as fetch_eng # noqa
from .. import fetch_sci as fetch_sci # noqa
from .test_units import * # noqa
| 31
| 63
| 0.741935
| 47
| 279
| 4.255319
| 0.510638
| 0.15
| 0.225
| 0.19
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004444
| 0.193548
| 279
| 8
| 64
| 34.875
| 0.884444
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98fbc3788bec13e62b4ea138f900e10a1735af72
| 13,382
|
py
|
Python
|
tests/test_androidtv_async.py
|
tungmeister/python-androidtv
|
7d496ced19ffe630962c4b210113db4b6b567fcc
|
[
"MIT"
] | 1
|
2022-03-13T16:13:13.000Z
|
2022-03-13T16:13:13.000Z
|
tests/test_androidtv_async.py
|
tungmeister/python-androidtv
|
7d496ced19ffe630962c4b210113db4b6b567fcc
|
[
"MIT"
] | null | null | null |
tests/test_androidtv_async.py
|
tungmeister/python-androidtv
|
7d496ced19ffe630962c4b210113db4b6b567fcc
|
[
"MIT"
] | null | null | null |
import asyncio
import sys
import unittest
from unittest.mock import patch
sys.path.insert(0, "..")
from androidtv import constants
from androidtv.androidtv.androidtv_async import AndroidTVAsync
from . import async_patchers
from .async_wrapper import awaiter
from .patchers import patch_calls
STREAM_MUSIC_EMPTY = "- STREAM_MUSIC:\n \n- STREAM"
STREAM_MUSIC_OFF = """- STREAM_MUSIC:
Muted: false
Min: 0
Max: 60
Current: 2 (speaker): 20, 40000 (hmdi_arc): 27, 40000000 (default): 15
Devices: speaker
- STREAM_ALARM:
Muted: true
Min: 0
Max: 7
Current: 2 (speaker): 3, 40000 (hmdi_arc): 3, 40000000 (default): 2
Devices: speaker"""
STREAM_MUSIC_NO_VOLUME = """- STREAM_MUSIC:
Muted: false
Min: 0
Max: 60
Devices: speaker
- STREAM_ALARM:
Muted: true
Min: 0
Max: 7
Current: 2 (speaker): 3, 40000 (hmdi_arc): 3, 40000000 (default): 2
Devices: speaker"""
STREAM_MUSIC_ON = """- STREAM_MUSIC:
Muted: false
Min: 0
Max: 60
Current: 2 (speaker): 20, 40000 (hmdi_arc): 22, 40000000 (default): 15
Devices: hmdi_arc
- STREAM_ALARM:
Muted: false
Min: 0
Max: 7
Current: 2 (speaker): 3, 40000 (hmdi_arc): 3, 40000000 (default): 2
Devices: speaker"""
class TestAndroidTVAsyncPython(unittest.TestCase):
PATCH_KEY = "python"
ADB_ATTR = "_adb"
@awaiter
async def setUp(self):
with async_patchers.PATCH_ADB_DEVICE_TCP, async_patchers.patch_connect(True)[
self.PATCH_KEY
], async_patchers.patch_shell("")[self.PATCH_KEY]:
self.atv = AndroidTVAsync("HOST", 5555)
await self.atv.adb_connect()
@awaiter
async def test_turn_on_off(self):
"""Test that the ``AndroidTVAsync.turn_on`` and ``AndroidTVAsync.turn_off`` methods work correctly."""
with async_patchers.patch_connect(True)[self.PATCH_KEY], async_patchers.patch_shell("")[self.PATCH_KEY]:
await self.atv.turn_on()
self.assertEqual(
getattr(self.atv._adb, self.ADB_ATTR).shell_cmd,
constants.CMD_SCREEN_ON + " || input keyevent {0}".format(constants.KEY_POWER),
)
await self.atv.turn_off()
self.assertEqual(
getattr(self.atv._adb, self.ADB_ATTR).shell_cmd,
constants.CMD_SCREEN_ON + " && input keyevent {0}".format(constants.KEY_POWER),
)
@awaiter
async def test_start_intent(self):
"""Test that the ``start_intent`` method works correctly."""
with async_patchers.patch_connect(True)[self.PATCH_KEY], async_patchers.patch_shell("")[self.PATCH_KEY]:
await self.atv.start_intent("TEST")
self.assertEqual(
getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "am start -a android.intent.action.VIEW -d TEST"
)
@awaiter
async def test_running_apps(self):
"""Check that the ``running_apps`` property works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
with patch_calls(self.atv, self.atv._running_apps) as patched:
await self.atv.running_apps()
assert patched.called
@awaiter
async def test_stream_music_properties(self):
"""Check that the ``stream_music_properties`` method works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
with patch_calls(self.atv, self.atv._audio_output_device) as audio_output_device, patch_calls(
self.atv, self.atv._is_volume_muted
) as is_volume_muted, patch_calls(self.atv, self.atv._volume) as volume, patch_calls(
self.atv, self.atv._volume_level
) as volume_level:
await self.atv.stream_music_properties()
assert audio_output_device.called
assert is_volume_muted.called
assert volume.called
assert volume_level.called
with patch_calls(self.atv, self.atv._audio_output_device) as audio_output_device:
await self.atv.audio_output_device()
assert audio_output_device.called
with patch_calls(self.atv, self.atv._is_volume_muted) as is_volume_muted:
await self.atv.is_volume_muted()
assert is_volume_muted.called
with patch_calls(self.atv, self.atv._volume) as volume:
await self.atv.volume()
assert volume.called
with patch_calls(self.atv, self.atv._volume_level) as volume_level:
await self.atv.volume_level()
assert volume_level.called
@awaiter
async def test_set_volume_level(self):
"""Check that the ``set_volume_level`` method works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
new_volume_level = await self.atv.set_volume_level(0.5)
self.assertIsNone(new_volume_level)
with async_patchers.patch_shell("")[self.PATCH_KEY]:
new_volume_level = await self.atv.set_volume_level(0.5)
self.assertIsNone(new_volume_level)
with async_patchers.patch_shell(STREAM_MUSIC_ON)[self.PATCH_KEY]:
new_volume_level = await self.atv.set_volume_level(0.5)
self.assertEqual(new_volume_level, 0.5)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "media volume --show --stream 3 --set 30")
with async_patchers.patch_shell("")[self.PATCH_KEY]:
new_volume_level = await self.atv.set_volume_level(30.0 / 60)
self.assertEqual(new_volume_level, 0.5)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "media volume --show --stream 3 --set 30")
with async_patchers.patch_shell("")[self.PATCH_KEY]:
new_volume_level = await self.atv.set_volume_level(22.0 / 60)
self.assertEqual(new_volume_level, 22.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "media volume --show --stream 3 --set 22")
@awaiter
async def test_volume_up(self):
"""Check that the ``volume_up`` method works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_up()
self.assertIsNone(new_volume_level)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 24")
with async_patchers.patch_shell("")[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_up()
self.assertIsNone(new_volume_level)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 24")
with async_patchers.patch_shell(STREAM_MUSIC_ON)[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_up()
self.assertEqual(new_volume_level, 23.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 24")
new_volume_level = await self.atv.volume_up(23.0 / 60)
self.assertEqual(new_volume_level, 24.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 24")
with async_patchers.patch_shell(STREAM_MUSIC_OFF)[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_up()
self.assertEqual(new_volume_level, 21.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 24")
new_volume_level = await self.atv.volume_up(21.0 / 60)
self.assertEqual(new_volume_level, 22.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 24")
@awaiter
async def test_volume_down(self):
"""Check that the ``volume_down`` method works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_down()
self.assertIsNone(new_volume_level)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 25")
with async_patchers.patch_shell("")[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_down()
self.assertIsNone(new_volume_level)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 25")
with async_patchers.patch_shell(STREAM_MUSIC_ON)[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_down()
self.assertEqual(new_volume_level, 21.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 25")
new_volume_level = await self.atv.volume_down(21.0 / 60)
self.assertEqual(new_volume_level, 20.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 25")
with async_patchers.patch_shell(STREAM_MUSIC_OFF)[self.PATCH_KEY]:
new_volume_level = await self.atv.volume_down()
self.assertEqual(new_volume_level, 19.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 25")
new_volume_level = await self.atv.volume_down(19.0 / 60)
self.assertEqual(new_volume_level, 18.0 / 60)
self.assertEqual(getattr(self.atv._adb, self.ADB_ATTR).shell_cmd, "input keyevent 25")
@awaiter
async def test_get_properties(self):
"""Check that ``get_properties()`` works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
with patch_calls(
self.atv, self.atv.screen_on_awake_wake_lock_size
) as screen_on_awake_wake_lock_size, patch_calls(
self.atv, self.atv.current_app_media_session_state
) as current_app_media_session_state, patch_calls(
self.atv, self.atv.stream_music_properties
) as stream_music_properties, patch_calls(
self.atv, self.atv.running_apps
) as running_apps, patch_calls(
self.atv, self.atv.get_hdmi_input
) as get_hdmi_input:
await self.atv.get_properties(lazy=True)
assert screen_on_awake_wake_lock_size.called
assert not current_app_media_session_state.called
assert not running_apps.called
assert not get_hdmi_input.called
with patch_calls(
self.atv, self.atv.screen_on_awake_wake_lock_size
) as screen_on_awake_wake_lock_size, patch_calls(
self.atv, self.atv.current_app_media_session_state
) as current_app_media_session_state, patch_calls(
self.atv, self.atv.stream_music_properties
) as stream_music_properties, patch_calls(
self.atv, self.atv.running_apps
) as running_apps, patch_calls(
self.atv, self.atv.get_hdmi_input
) as get_hdmi_input:
await self.atv.get_properties(lazy=False, get_running_apps=True)
assert screen_on_awake_wake_lock_size.called
assert current_app_media_session_state.called
assert running_apps.called
assert get_hdmi_input.called
with patch_calls(
self.atv, self.atv.screen_on_awake_wake_lock_size
) as screen_on_awake_wake_lock_size, patch_calls(
self.atv, self.atv.current_app_media_session_state
) as current_app_media_session_state, patch_calls(
self.atv, self.atv.stream_music_properties
) as stream_music_properties, patch_calls(
self.atv, self.atv.running_apps
) as running_apps, patch_calls(
self.atv, self.atv.get_hdmi_input
) as get_hdmi_input:
await self.atv.get_properties(lazy=False, get_running_apps=False)
assert screen_on_awake_wake_lock_size.called
assert current_app_media_session_state.called
assert not running_apps.called
assert get_hdmi_input.called
@awaiter
async def test_get_properties_dict(self):
"""Check that ``get_properties_dict()`` works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
with patch_calls(self.atv, self.atv.get_properties) as get_properties:
await self.atv.get_properties_dict()
assert get_properties.called
@awaiter
async def test_update(self):
"""Check that the ``update`` method works correctly."""
with async_patchers.patch_shell(None)[self.PATCH_KEY]:
with patch_calls(self.atv, self.atv._update) as patched:
await self.atv.update()
assert patched.called
class TestAndroidTVAsyncServer(TestAndroidTVAsyncPython):
PATCH_KEY = "server"
ADB_ATTR = "_adb_device"
@awaiter
async def setUp(self):
with async_patchers.patch_connect(True)[self.PATCH_KEY], async_patchers.patch_shell("")[self.PATCH_KEY]:
self.atv = AndroidTVAsync("HOST", 5555, adb_server_ip="ADB_SERVER_IP")
await self.atv.adb_connect()
if __name__ == "__main__":
unittest.main()
| 44.019737
| 120
| 0.654536
| 1,754
| 13,382
| 4.688712
| 0.080388
| 0.089373
| 0.057879
| 0.053745
| 0.849587
| 0.787695
| 0.779183
| 0.76909
| 0.754134
| 0.732369
| 0
| 0.023376
| 0.248767
| 13,382
| 303
| 121
| 44.165017
| 0.794688
| 0
| 0
| 0.645161
| 0
| 0
| 0.101539
| 0.002042
| 0
| 0
| 0
| 0
| 0.233871
| 1
| 0
| false
| 0
| 0.03629
| 0
| 0.060484
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c7100a7c22c6e5a0ee4f81b7e0373e6940019ec1
| 264
|
py
|
Python
|
api/models.py
|
hemantpy/Flask-Okta
|
6f96ce4b90224aba72f8b853ce43e907c75e1360
|
[
"Apache-2.0"
] | null | null | null |
api/models.py
|
hemantpy/Flask-Okta
|
6f96ce4b90224aba72f8b853ce43e907c75e1360
|
[
"Apache-2.0"
] | null | null | null |
api/models.py
|
hemantpy/Flask-Okta
|
6f96ce4b90224aba72f8b853ce43e907c75e1360
|
[
"Apache-2.0"
] | null | null | null |
class Model:
def to_dict(self):
return NotImplementedError
class User(Model):
def to_dict(self):
return {}
class UserID(Model):
def to_dict(self):
return {}
class UserAuth(Model):
def to_dict(self):
return {}
| 13.2
| 34
| 0.594697
| 32
| 264
| 4.78125
| 0.34375
| 0.20915
| 0.261438
| 0.366013
| 0.69281
| 0.69281
| 0.379085
| 0
| 0
| 0
| 0
| 0
| 0.299242
| 264
| 19
| 35
| 13.894737
| 0.827027
| 0
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.