hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82b81b1430ce28d061a3efa8bbb5babbc0a2ec76
| 367,187
|
py
|
Python
|
tests/unit/gapic/container_v1/test_cluster_manager.py
|
donmccasland/python-container
|
5053428f63792fa822ae28f34c0e35150794d153
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/container_v1/test_cluster_manager.py
|
donmccasland/python-container
|
5053428f63792fa822ae28f34c0e35150794d153
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/container_v1/test_cluster_manager.py
|
donmccasland/python-container
|
5053428f63792fa822ae28f34c0e35150794d153
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.container_v1.services.cluster_manager import ClusterManagerAsyncClient
from google.cloud.container_v1.services.cluster_manager import ClusterManagerClient
from google.cloud.container_v1.services.cluster_manager import pagers
from google.cloud.container_v1.services.cluster_manager import transports
from google.cloud.container_v1.services.cluster_manager.transports.base import (
_API_CORE_VERSION,
)
from google.cloud.container_v1.services.cluster_manager.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.container_v1.types import cluster_service
from google.oauth2 import service_account
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ClusterManagerClient._get_default_mtls_endpoint(None) is None
assert (
ClusterManagerClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [ClusterManagerClient, ClusterManagerAsyncClient,]
)
def test_cluster_manager_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "container.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [ClusterManagerClient, ClusterManagerAsyncClient,]
)
def test_cluster_manager_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "container.googleapis.com:443"
def test_cluster_manager_client_get_transport_class():
transport = ClusterManagerClient.get_transport_class()
available_transports = [
transports.ClusterManagerGrpcTransport,
]
assert transport in available_transports
transport = ClusterManagerClient.get_transport_class("grpc")
assert transport == transports.ClusterManagerGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"),
(
ClusterManagerAsyncClient,
transports.ClusterManagerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ClusterManagerClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ClusterManagerClient),
)
@mock.patch.object(
ClusterManagerAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ClusterManagerAsyncClient),
)
def test_cluster_manager_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ClusterManagerClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ClusterManagerClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"),
(
ClusterManagerAsyncClient,
transports.ClusterManagerGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"),
(
ClusterManagerAsyncClient,
transports.ClusterManagerGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ClusterManagerClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ClusterManagerClient),
)
@mock.patch.object(
ClusterManagerAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ClusterManagerAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cluster_manager_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"),
(
ClusterManagerAsyncClient,
transports.ClusterManagerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cluster_manager_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"),
(
ClusterManagerAsyncClient,
transports.ClusterManagerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cluster_manager_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_cluster_manager_client_client_options_from_dict():
with mock.patch(
"google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ClusterManagerClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_clusters(
transport: str = "grpc", request_type=cluster_service.ListClustersRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse(
missing_zones=["missing_zones_value"],
)
response = client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListClustersResponse)
assert response.missing_zones == ["missing_zones_value"]
def test_list_clusters_from_dict():
test_list_clusters(request_type=dict)
def test_list_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
client.list_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListClustersRequest()
@pytest.mark.asyncio
async def test_list_clusters_async(
transport: str = "grpc_asyncio", request_type=cluster_service.ListClustersRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse(missing_zones=["missing_zones_value"],)
)
response = await client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListClustersResponse)
assert response.missing_zones == ["missing_zones_value"]
@pytest.mark.asyncio
async def test_list_clusters_async_from_dict():
await test_list_clusters_async(request_type=dict)
def test_list_clusters_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = cluster_service.ListClustersResponse()
client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_clusters_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse()
)
await client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_clusters_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_clusters(
project_id="project_id_value", zone="zone_value", parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].parent == "parent_value"
def test_list_clusters_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_clusters(
cluster_service.ListClustersRequest(),
project_id="project_id_value",
zone="zone_value",
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_clusters_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_clusters(
project_id="project_id_value", zone="zone_value", parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_clusters_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_clusters(
cluster_service.ListClustersRequest(),
project_id="project_id_value",
zone="zone_value",
parent="parent_value",
)
def test_get_cluster(
transport: str = "grpc", request_type=cluster_service.GetClusterRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster(
name="name_value",
description="description_value",
initial_node_count=1911,
logging_service="logging_service_value",
monitoring_service="monitoring_service_value",
network="network_value",
cluster_ipv4_cidr="cluster_ipv4_cidr_value",
subnetwork="subnetwork_value",
locations=["locations_value"],
enable_kubernetes_alpha=True,
label_fingerprint="label_fingerprint_value",
self_link="self_link_value",
zone="zone_value",
endpoint="endpoint_value",
initial_cluster_version="initial_cluster_version_value",
current_master_version="current_master_version_value",
current_node_version="current_node_version_value",
create_time="create_time_value",
status=cluster_service.Cluster.Status.PROVISIONING,
status_message="status_message_value",
node_ipv4_cidr_size=1955,
services_ipv4_cidr="services_ipv4_cidr_value",
instance_group_urls=["instance_group_urls_value"],
current_node_count=1936,
expire_time="expire_time_value",
location="location_value",
enable_tpu=True,
tpu_ipv4_cidr_block="tpu_ipv4_cidr_block_value",
)
response = client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Cluster)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.initial_node_count == 1911
assert response.logging_service == "logging_service_value"
assert response.monitoring_service == "monitoring_service_value"
assert response.network == "network_value"
assert response.cluster_ipv4_cidr == "cluster_ipv4_cidr_value"
assert response.subnetwork == "subnetwork_value"
assert response.locations == ["locations_value"]
assert response.enable_kubernetes_alpha is True
assert response.label_fingerprint == "label_fingerprint_value"
assert response.self_link == "self_link_value"
assert response.zone == "zone_value"
assert response.endpoint == "endpoint_value"
assert response.initial_cluster_version == "initial_cluster_version_value"
assert response.current_master_version == "current_master_version_value"
assert response.current_node_version == "current_node_version_value"
assert response.create_time == "create_time_value"
assert response.status == cluster_service.Cluster.Status.PROVISIONING
assert response.status_message == "status_message_value"
assert response.node_ipv4_cidr_size == 1955
assert response.services_ipv4_cidr == "services_ipv4_cidr_value"
assert response.instance_group_urls == ["instance_group_urls_value"]
assert response.current_node_count == 1936
assert response.expire_time == "expire_time_value"
assert response.location == "location_value"
assert response.enable_tpu is True
assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value"
def test_get_cluster_from_dict():
test_get_cluster(request_type=dict)
def test_get_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
client.get_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetClusterRequest()
@pytest.mark.asyncio
async def test_get_cluster_async(
transport: str = "grpc_asyncio", request_type=cluster_service.GetClusterRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster(
name="name_value",
description="description_value",
initial_node_count=1911,
logging_service="logging_service_value",
monitoring_service="monitoring_service_value",
network="network_value",
cluster_ipv4_cidr="cluster_ipv4_cidr_value",
subnetwork="subnetwork_value",
locations=["locations_value"],
enable_kubernetes_alpha=True,
label_fingerprint="label_fingerprint_value",
self_link="self_link_value",
zone="zone_value",
endpoint="endpoint_value",
initial_cluster_version="initial_cluster_version_value",
current_master_version="current_master_version_value",
current_node_version="current_node_version_value",
create_time="create_time_value",
status=cluster_service.Cluster.Status.PROVISIONING,
status_message="status_message_value",
node_ipv4_cidr_size=1955,
services_ipv4_cidr="services_ipv4_cidr_value",
instance_group_urls=["instance_group_urls_value"],
current_node_count=1936,
expire_time="expire_time_value",
location="location_value",
enable_tpu=True,
tpu_ipv4_cidr_block="tpu_ipv4_cidr_block_value",
)
)
response = await client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Cluster)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.initial_node_count == 1911
assert response.logging_service == "logging_service_value"
assert response.monitoring_service == "monitoring_service_value"
assert response.network == "network_value"
assert response.cluster_ipv4_cidr == "cluster_ipv4_cidr_value"
assert response.subnetwork == "subnetwork_value"
assert response.locations == ["locations_value"]
assert response.enable_kubernetes_alpha is True
assert response.label_fingerprint == "label_fingerprint_value"
assert response.self_link == "self_link_value"
assert response.zone == "zone_value"
assert response.endpoint == "endpoint_value"
assert response.initial_cluster_version == "initial_cluster_version_value"
assert response.current_master_version == "current_master_version_value"
assert response.current_node_version == "current_node_version_value"
assert response.create_time == "create_time_value"
assert response.status == cluster_service.Cluster.Status.PROVISIONING
assert response.status_message == "status_message_value"
assert response.node_ipv4_cidr_size == 1955
assert response.services_ipv4_cidr == "services_ipv4_cidr_value"
assert response.instance_group_urls == ["instance_group_urls_value"]
assert response.current_node_count == 1936
assert response.expire_time == "expire_time_value"
assert response.location == "location_value"
assert response.enable_tpu is True
assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value"
@pytest.mark.asyncio
async def test_get_cluster_async_from_dict():
await test_get_cluster_async(request_type=dict)
def test_get_cluster_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = cluster_service.Cluster()
client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_cluster_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster()
)
await client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_cluster_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_cluster(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
def test_get_cluster_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_cluster(
cluster_service.GetClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_get_cluster_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_cluster(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_cluster_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_cluster(
cluster_service.GetClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
def test_create_cluster(
transport: str = "grpc", request_type=cluster_service.CreateClusterRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_create_cluster_from_dict():
test_create_cluster(request_type=dict)
def test_create_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
client.create_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CreateClusterRequest()
@pytest.mark.asyncio
async def test_create_cluster_async(
transport: str = "grpc_asyncio", request_type=cluster_service.CreateClusterRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_create_cluster_async_from_dict():
await test_create_cluster_async(request_type=dict)
def test_create_cluster_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CreateClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_cluster_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CreateClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_cluster_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_cluster(
project_id="project_id_value",
zone="zone_value",
cluster=cluster_service.Cluster(name="name_value"),
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster == cluster_service.Cluster(name="name_value")
assert args[0].parent == "parent_value"
def test_create_cluster_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_cluster(
cluster_service.CreateClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster=cluster_service.Cluster(name="name_value"),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_create_cluster_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_cluster(
project_id="project_id_value",
zone="zone_value",
cluster=cluster_service.Cluster(name="name_value"),
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster == cluster_service.Cluster(name="name_value")
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_create_cluster_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_cluster(
cluster_service.CreateClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster=cluster_service.Cluster(name="name_value"),
parent="parent_value",
)
def test_update_cluster(
transport: str = "grpc", request_type=cluster_service.UpdateClusterRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_update_cluster_from_dict():
test_update_cluster(request_type=dict)
def test_update_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
client.update_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateClusterRequest()
@pytest.mark.asyncio
async def test_update_cluster_async(
transport: str = "grpc_asyncio", request_type=cluster_service.UpdateClusterRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_update_cluster_async_from_dict():
await test_update_cluster_async(request_type=dict)
def test_update_cluster_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.UpdateClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_cluster_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.UpdateClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_update_cluster_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_cluster(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
update=cluster_service.ClusterUpdate(
desired_node_version="desired_node_version_value"
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].update == cluster_service.ClusterUpdate(
desired_node_version="desired_node_version_value"
)
assert args[0].name == "name_value"
def test_update_cluster_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_cluster(
cluster_service.UpdateClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
update=cluster_service.ClusterUpdate(
desired_node_version="desired_node_version_value"
),
name="name_value",
)
@pytest.mark.asyncio
async def test_update_cluster_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_cluster(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
update=cluster_service.ClusterUpdate(
desired_node_version="desired_node_version_value"
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].update == cluster_service.ClusterUpdate(
desired_node_version="desired_node_version_value"
)
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_update_cluster_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_cluster(
cluster_service.UpdateClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
update=cluster_service.ClusterUpdate(
desired_node_version="desired_node_version_value"
),
name="name_value",
)
def test_update_node_pool(
transport: str = "grpc", request_type=cluster_service.UpdateNodePoolRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.update_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_update_node_pool_from_dict():
test_update_node_pool(request_type=dict)
def test_update_node_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
client.update_node_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateNodePoolRequest()
@pytest.mark.asyncio
async def test_update_node_pool_async(
transport: str = "grpc_asyncio", request_type=cluster_service.UpdateNodePoolRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.update_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_update_node_pool_async_from_dict():
await test_update_node_pool_async(request_type=dict)
def test_update_node_pool_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.UpdateNodePoolRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_node_pool_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.UpdateNodePoolRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.update_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_node_pool_autoscaling(
transport: str = "grpc", request_type=cluster_service.SetNodePoolAutoscalingRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_node_pool_autoscaling(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_node_pool_autoscaling_from_dict():
test_set_node_pool_autoscaling(request_type=dict)
def test_set_node_pool_autoscaling_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
client.set_node_pool_autoscaling()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
@pytest.mark.asyncio
async def test_set_node_pool_autoscaling_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.SetNodePoolAutoscalingRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_node_pool_autoscaling(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_node_pool_autoscaling_async_from_dict():
await test_set_node_pool_autoscaling_async(request_type=dict)
def test_set_node_pool_autoscaling_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNodePoolAutoscalingRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_node_pool_autoscaling(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_node_pool_autoscaling_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNodePoolAutoscalingRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_node_pool_autoscaling(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_logging_service(
transport: str = "grpc", request_type=cluster_service.SetLoggingServiceRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_logging_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLoggingServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_logging_service_from_dict():
test_set_logging_service(request_type=dict)
def test_set_logging_service_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
client.set_logging_service()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLoggingServiceRequest()
@pytest.mark.asyncio
async def test_set_logging_service_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.SetLoggingServiceRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_logging_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLoggingServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_logging_service_async_from_dict():
await test_set_logging_service_async(request_type=dict)
def test_set_logging_service_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLoggingServiceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_logging_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_logging_service_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLoggingServiceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_logging_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_logging_service_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_logging_service(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
logging_service="logging_service_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].logging_service == "logging_service_value"
assert args[0].name == "name_value"
def test_set_logging_service_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_logging_service(
cluster_service.SetLoggingServiceRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
logging_service="logging_service_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_set_logging_service_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_logging_service(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
logging_service="logging_service_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].logging_service == "logging_service_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_logging_service_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_logging_service(
cluster_service.SetLoggingServiceRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
logging_service="logging_service_value",
name="name_value",
)
def test_set_monitoring_service(
transport: str = "grpc", request_type=cluster_service.SetMonitoringServiceRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_monitoring_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMonitoringServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_monitoring_service_from_dict():
test_set_monitoring_service(request_type=dict)
def test_set_monitoring_service_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
client.set_monitoring_service()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMonitoringServiceRequest()
@pytest.mark.asyncio
async def test_set_monitoring_service_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.SetMonitoringServiceRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_monitoring_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMonitoringServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_monitoring_service_async_from_dict():
await test_set_monitoring_service_async(request_type=dict)
def test_set_monitoring_service_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetMonitoringServiceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_monitoring_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_monitoring_service_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetMonitoringServiceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_monitoring_service(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_monitoring_service_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_monitoring_service(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
monitoring_service="monitoring_service_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].monitoring_service == "monitoring_service_value"
assert args[0].name == "name_value"
def test_set_monitoring_service_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_monitoring_service(
cluster_service.SetMonitoringServiceRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
monitoring_service="monitoring_service_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_set_monitoring_service_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_monitoring_service(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
monitoring_service="monitoring_service_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].monitoring_service == "monitoring_service_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_monitoring_service_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_monitoring_service(
cluster_service.SetMonitoringServiceRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
monitoring_service="monitoring_service_value",
name="name_value",
)
def test_set_addons_config(
transport: str = "grpc", request_type=cluster_service.SetAddonsConfigRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_addons_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetAddonsConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_addons_config_from_dict():
test_set_addons_config(request_type=dict)
def test_set_addons_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
client.set_addons_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetAddonsConfigRequest()
@pytest.mark.asyncio
async def test_set_addons_config_async(
transport: str = "grpc_asyncio", request_type=cluster_service.SetAddonsConfigRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_addons_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetAddonsConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_addons_config_async_from_dict():
await test_set_addons_config_async(request_type=dict)
def test_set_addons_config_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetAddonsConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_addons_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_addons_config_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetAddonsConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_addons_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_addons_config_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_addons_config(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
addons_config=cluster_service.AddonsConfig(
http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].addons_config == cluster_service.AddonsConfig(
http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)
)
assert args[0].name == "name_value"
def test_set_addons_config_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_addons_config(
cluster_service.SetAddonsConfigRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
addons_config=cluster_service.AddonsConfig(
http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)
),
name="name_value",
)
@pytest.mark.asyncio
async def test_set_addons_config_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_addons_config(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
addons_config=cluster_service.AddonsConfig(
http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].addons_config == cluster_service.AddonsConfig(
http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)
)
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_addons_config_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_addons_config(
cluster_service.SetAddonsConfigRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
addons_config=cluster_service.AddonsConfig(
http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)
),
name="name_value",
)
def test_set_locations(
transport: str = "grpc", request_type=cluster_service.SetLocationsRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLocationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_locations_from_dict():
test_set_locations(request_type=dict)
def test_set_locations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
client.set_locations()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLocationsRequest()
@pytest.mark.asyncio
async def test_set_locations_async(
transport: str = "grpc_asyncio", request_type=cluster_service.SetLocationsRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLocationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_locations_async_from_dict():
await test_set_locations_async(request_type=dict)
def test_set_locations_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLocationsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_locations_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLocationsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_locations_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_locations(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
locations=["locations_value"],
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].locations == ["locations_value"]
assert args[0].name == "name_value"
def test_set_locations_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_locations(
cluster_service.SetLocationsRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
locations=["locations_value"],
name="name_value",
)
@pytest.mark.asyncio
async def test_set_locations_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_locations(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
locations=["locations_value"],
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].locations == ["locations_value"]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_locations_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_locations(
cluster_service.SetLocationsRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
locations=["locations_value"],
name="name_value",
)
def test_update_master(
transport: str = "grpc", request_type=cluster_service.UpdateMasterRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.update_master(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateMasterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_update_master_from_dict():
test_update_master(request_type=dict)
def test_update_master_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
client.update_master()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateMasterRequest()
@pytest.mark.asyncio
async def test_update_master_async(
transport: str = "grpc_asyncio", request_type=cluster_service.UpdateMasterRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.update_master(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.UpdateMasterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_update_master_async_from_dict():
await test_update_master_async(request_type=dict)
def test_update_master_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.UpdateMasterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_master(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_master_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.UpdateMasterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.update_master(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_update_master_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_master(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
master_version="master_version_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].master_version == "master_version_value"
assert args[0].name == "name_value"
def test_update_master_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_master(
cluster_service.UpdateMasterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
master_version="master_version_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_update_master_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_master(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
master_version="master_version_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].master_version == "master_version_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_update_master_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_master(
cluster_service.UpdateMasterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
master_version="master_version_value",
name="name_value",
)
def test_set_master_auth(
transport: str = "grpc", request_type=cluster_service.SetMasterAuthRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_master_auth(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMasterAuthRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_master_auth_from_dict():
test_set_master_auth(request_type=dict)
def test_set_master_auth_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
client.set_master_auth()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMasterAuthRequest()
@pytest.mark.asyncio
async def test_set_master_auth_async(
transport: str = "grpc_asyncio", request_type=cluster_service.SetMasterAuthRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_master_auth(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMasterAuthRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_master_auth_async_from_dict():
await test_set_master_auth_async(request_type=dict)
def test_set_master_auth_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetMasterAuthRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_master_auth(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_master_auth_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetMasterAuthRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_master_auth(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_cluster(
transport: str = "grpc", request_type=cluster_service.DeleteClusterRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_delete_cluster_from_dict():
test_delete_cluster(request_type=dict)
def test_delete_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
client.delete_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.DeleteClusterRequest()
@pytest.mark.asyncio
async def test_delete_cluster_async(
transport: str = "grpc_asyncio", request_type=cluster_service.DeleteClusterRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_delete_cluster_async_from_dict():
await test_delete_cluster_async(request_type=dict)
def test_delete_cluster_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.DeleteClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_cluster_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.DeleteClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_cluster_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_cluster(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
def test_delete_cluster_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_cluster(
cluster_service.DeleteClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_cluster_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_cluster(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_cluster_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_cluster(
cluster_service.DeleteClusterRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
def test_list_operations(
transport: str = "grpc", request_type=cluster_service.ListOperationsRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse(
missing_zones=["missing_zones_value"],
)
response = client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListOperationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListOperationsResponse)
assert response.missing_zones == ["missing_zones_value"]
def test_list_operations_from_dict():
test_list_operations(request_type=dict)
def test_list_operations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
client.list_operations()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListOperationsRequest()
@pytest.mark.asyncio
async def test_list_operations_async(
transport: str = "grpc_asyncio", request_type=cluster_service.ListOperationsRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse(
missing_zones=["missing_zones_value"],
)
)
response = await client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListOperationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListOperationsResponse)
assert response.missing_zones == ["missing_zones_value"]
@pytest.mark.asyncio
async def test_list_operations_async_from_dict():
await test_list_operations_async(request_type=dict)
def test_list_operations_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListOperationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = cluster_service.ListOperationsResponse()
client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_operations_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListOperationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse()
)
await client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_operations_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_operations(
project_id="project_id_value", zone="zone_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
def test_list_operations_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_operations(
cluster_service.ListOperationsRequest(),
project_id="project_id_value",
zone="zone_value",
)
@pytest.mark.asyncio
async def test_list_operations_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_operations(
project_id="project_id_value", zone="zone_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
@pytest.mark.asyncio
async def test_list_operations_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_operations(
cluster_service.ListOperationsRequest(),
project_id="project_id_value",
zone="zone_value",
)
def test_get_operation(
transport: str = "grpc", request_type=cluster_service.GetOperationRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetOperationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_get_operation_from_dict():
test_get_operation(request_type=dict)
def test_get_operation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
client.get_operation()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetOperationRequest()
@pytest.mark.asyncio
async def test_get_operation_async(
transport: str = "grpc_asyncio", request_type=cluster_service.GetOperationRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetOperationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_get_operation_async_from_dict():
await test_get_operation_async(request_type=dict)
def test_get_operation_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetOperationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = cluster_service.Operation()
client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_operation_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetOperationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_operation_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_operation(
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].operation_id == "operation_id_value"
assert args[0].name == "name_value"
def test_get_operation_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_operation(
cluster_service.GetOperationRequest(),
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_get_operation_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_operation(
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].operation_id == "operation_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_operation_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_operation(
cluster_service.GetOperationRequest(),
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
def test_cancel_operation(
transport: str = "grpc", request_type=cluster_service.CancelOperationRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CancelOperationRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_operation_from_dict():
test_cancel_operation(request_type=dict)
def test_cancel_operation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
client.cancel_operation()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CancelOperationRequest()
@pytest.mark.asyncio
async def test_cancel_operation_async(
transport: str = "grpc_asyncio", request_type=cluster_service.CancelOperationRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CancelOperationRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_operation_async_from_dict():
await test_cancel_operation_async(request_type=dict)
def test_cancel_operation_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CancelOperationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = None
client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_operation_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CancelOperationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_operation_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_operation(
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].operation_id == "operation_id_value"
assert args[0].name == "name_value"
def test_cancel_operation_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_operation(
cluster_service.CancelOperationRequest(),
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_operation_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_operation(
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].operation_id == "operation_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_cancel_operation_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_operation(
cluster_service.CancelOperationRequest(),
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
name="name_value",
)
def test_get_server_config(
transport: str = "grpc", request_type=cluster_service.GetServerConfigRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig(
default_cluster_version="default_cluster_version_value",
valid_node_versions=["valid_node_versions_value"],
default_image_type="default_image_type_value",
valid_image_types=["valid_image_types_value"],
valid_master_versions=["valid_master_versions_value"],
)
response = client.get_server_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetServerConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ServerConfig)
assert response.default_cluster_version == "default_cluster_version_value"
assert response.valid_node_versions == ["valid_node_versions_value"]
assert response.default_image_type == "default_image_type_value"
assert response.valid_image_types == ["valid_image_types_value"]
assert response.valid_master_versions == ["valid_master_versions_value"]
def test_get_server_config_from_dict():
test_get_server_config(request_type=dict)
def test_get_server_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
client.get_server_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetServerConfigRequest()
@pytest.mark.asyncio
async def test_get_server_config_async(
transport: str = "grpc_asyncio", request_type=cluster_service.GetServerConfigRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ServerConfig(
default_cluster_version="default_cluster_version_value",
valid_node_versions=["valid_node_versions_value"],
default_image_type="default_image_type_value",
valid_image_types=["valid_image_types_value"],
valid_master_versions=["valid_master_versions_value"],
)
)
response = await client.get_server_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetServerConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ServerConfig)
assert response.default_cluster_version == "default_cluster_version_value"
assert response.valid_node_versions == ["valid_node_versions_value"]
assert response.default_image_type == "default_image_type_value"
assert response.valid_image_types == ["valid_image_types_value"]
assert response.valid_master_versions == ["valid_master_versions_value"]
@pytest.mark.asyncio
async def test_get_server_config_async_from_dict():
await test_get_server_config_async(request_type=dict)
def test_get_server_config_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetServerConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
call.return_value = cluster_service.ServerConfig()
client.get_server_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_server_config_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetServerConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ServerConfig()
)
await client.get_server_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_server_config_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_server_config(
project_id="project_id_value", zone="zone_value", name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].name == "name_value"
def test_get_server_config_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_server_config(
cluster_service.GetServerConfigRequest(),
project_id="project_id_value",
zone="zone_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_get_server_config_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ServerConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_server_config(
project_id="project_id_value", zone="zone_value", name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_server_config_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_server_config(
cluster_service.GetServerConfigRequest(),
project_id="project_id_value",
zone="zone_value",
name="name_value",
)
def test_get_json_web_keys(
transport: str = "grpc", request_type=cluster_service.GetJSONWebKeysRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_json_web_keys), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.GetJSONWebKeysResponse()
response = client.get_json_web_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetJSONWebKeysRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.GetJSONWebKeysResponse)
def test_get_json_web_keys_from_dict():
test_get_json_web_keys(request_type=dict)
def test_get_json_web_keys_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_json_web_keys), "__call__"
) as call:
client.get_json_web_keys()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetJSONWebKeysRequest()
@pytest.mark.asyncio
async def test_get_json_web_keys_async(
transport: str = "grpc_asyncio", request_type=cluster_service.GetJSONWebKeysRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_json_web_keys), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.GetJSONWebKeysResponse()
)
response = await client.get_json_web_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetJSONWebKeysRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.GetJSONWebKeysResponse)
@pytest.mark.asyncio
async def test_get_json_web_keys_async_from_dict():
await test_get_json_web_keys_async(request_type=dict)
def test_get_json_web_keys_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetJSONWebKeysRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_json_web_keys), "__call__"
) as call:
call.return_value = cluster_service.GetJSONWebKeysResponse()
client.get_json_web_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_json_web_keys_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetJSONWebKeysRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_json_web_keys), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.GetJSONWebKeysResponse()
)
await client.get_json_web_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_node_pools(
transport: str = "grpc", request_type=cluster_service.ListNodePoolsRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
response = client.list_node_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListNodePoolsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListNodePoolsResponse)
def test_list_node_pools_from_dict():
test_list_node_pools(request_type=dict)
def test_list_node_pools_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
client.list_node_pools()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListNodePoolsRequest()
@pytest.mark.asyncio
async def test_list_node_pools_async(
transport: str = "grpc_asyncio", request_type=cluster_service.ListNodePoolsRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListNodePoolsResponse()
)
response = await client.list_node_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListNodePoolsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListNodePoolsResponse)
@pytest.mark.asyncio
async def test_list_node_pools_async_from_dict():
await test_list_node_pools_async(request_type=dict)
def test_list_node_pools_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListNodePoolsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
call.return_value = cluster_service.ListNodePoolsResponse()
client.list_node_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_node_pools_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListNodePoolsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListNodePoolsResponse()
)
await client.list_node_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_node_pools_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_node_pools(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].parent == "parent_value"
def test_list_node_pools_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_node_pools(
cluster_service.ListNodePoolsRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_node_pools_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListNodePoolsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_node_pools(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_node_pools_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_node_pools(
cluster_service.ListNodePoolsRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
parent="parent_value",
)
def test_get_node_pool(
transport: str = "grpc", request_type=cluster_service.GetNodePoolRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool(
name="name_value",
initial_node_count=1911,
locations=["locations_value"],
self_link="self_link_value",
version="version_value",
instance_group_urls=["instance_group_urls_value"],
status=cluster_service.NodePool.Status.PROVISIONING,
status_message="status_message_value",
pod_ipv4_cidr_size=1856,
)
response = client.get_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.NodePool)
assert response.name == "name_value"
assert response.initial_node_count == 1911
assert response.locations == ["locations_value"]
assert response.self_link == "self_link_value"
assert response.version == "version_value"
assert response.instance_group_urls == ["instance_group_urls_value"]
assert response.status == cluster_service.NodePool.Status.PROVISIONING
assert response.status_message == "status_message_value"
assert response.pod_ipv4_cidr_size == 1856
def test_get_node_pool_from_dict():
test_get_node_pool(request_type=dict)
def test_get_node_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
client.get_node_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetNodePoolRequest()
@pytest.mark.asyncio
async def test_get_node_pool_async(
transport: str = "grpc_asyncio", request_type=cluster_service.GetNodePoolRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool(
name="name_value",
initial_node_count=1911,
locations=["locations_value"],
self_link="self_link_value",
version="version_value",
instance_group_urls=["instance_group_urls_value"],
status=cluster_service.NodePool.Status.PROVISIONING,
status_message="status_message_value",
pod_ipv4_cidr_size=1856,
)
)
response = await client.get_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.GetNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.NodePool)
assert response.name == "name_value"
assert response.initial_node_count == 1911
assert response.locations == ["locations_value"]
assert response.self_link == "self_link_value"
assert response.version == "version_value"
assert response.instance_group_urls == ["instance_group_urls_value"]
assert response.status == cluster_service.NodePool.Status.PROVISIONING
assert response.status_message == "status_message_value"
assert response.pod_ipv4_cidr_size == 1856
@pytest.mark.asyncio
async def test_get_node_pool_async_from_dict():
await test_get_node_pool_async(request_type=dict)
def test_get_node_pool_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetNodePoolRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
call.return_value = cluster_service.NodePool()
client.get_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_node_pool_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.GetNodePoolRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool()
)
await client.get_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_node_pool_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_node_pool(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool_id == "node_pool_id_value"
assert args[0].name == "name_value"
def test_get_node_pool_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_node_pool(
cluster_service.GetNodePoolRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_get_node_pool_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_node_pool(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool_id == "node_pool_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_node_pool_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_node_pool(
cluster_service.GetNodePoolRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
def test_create_node_pool(
transport: str = "grpc", request_type=cluster_service.CreateNodePoolRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.create_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CreateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_create_node_pool_from_dict():
test_create_node_pool(request_type=dict)
def test_create_node_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
client.create_node_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CreateNodePoolRequest()
@pytest.mark.asyncio
async def test_create_node_pool_async(
transport: str = "grpc_asyncio", request_type=cluster_service.CreateNodePoolRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.create_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CreateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_create_node_pool_async_from_dict():
await test_create_node_pool_async(request_type=dict)
def test_create_node_pool_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CreateNodePoolRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.create_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_node_pool_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CreateNodePoolRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.create_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_node_pool_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_node_pool(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool=cluster_service.NodePool(name="name_value"),
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool == cluster_service.NodePool(name="name_value")
assert args[0].parent == "parent_value"
def test_create_node_pool_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_node_pool(
cluster_service.CreateNodePoolRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool=cluster_service.NodePool(name="name_value"),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_create_node_pool_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_node_pool(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool=cluster_service.NodePool(name="name_value"),
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool == cluster_service.NodePool(name="name_value")
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_create_node_pool_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_node_pool(
cluster_service.CreateNodePoolRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool=cluster_service.NodePool(name="name_value"),
parent="parent_value",
)
def test_delete_node_pool(
transport: str = "grpc", request_type=cluster_service.DeleteNodePoolRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.delete_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.DeleteNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_delete_node_pool_from_dict():
test_delete_node_pool(request_type=dict)
def test_delete_node_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
client.delete_node_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.DeleteNodePoolRequest()
@pytest.mark.asyncio
async def test_delete_node_pool_async(
transport: str = "grpc_asyncio", request_type=cluster_service.DeleteNodePoolRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.delete_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.DeleteNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_delete_node_pool_async_from_dict():
await test_delete_node_pool_async(request_type=dict)
def test_delete_node_pool_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.DeleteNodePoolRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.delete_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_node_pool_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.DeleteNodePoolRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.delete_node_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_node_pool_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_node_pool(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool_id == "node_pool_id_value"
assert args[0].name == "name_value"
def test_delete_node_pool_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_node_pool(
cluster_service.DeleteNodePoolRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_node_pool_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_node_pool(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool_id == "node_pool_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_node_pool_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_node_pool(
cluster_service.DeleteNodePoolRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
def test_rollback_node_pool_upgrade(
transport: str = "grpc", request_type=cluster_service.RollbackNodePoolUpgradeRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.rollback_node_pool_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_rollback_node_pool_upgrade_from_dict():
test_rollback_node_pool_upgrade(request_type=dict)
def test_rollback_node_pool_upgrade_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
client.rollback_node_pool_upgrade()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
@pytest.mark.asyncio
async def test_rollback_node_pool_upgrade_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.RollbackNodePoolUpgradeRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.rollback_node_pool_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_rollback_node_pool_upgrade_async_from_dict():
await test_rollback_node_pool_upgrade_async(request_type=dict)
def test_rollback_node_pool_upgrade_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.RollbackNodePoolUpgradeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.rollback_node_pool_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_rollback_node_pool_upgrade_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.RollbackNodePoolUpgradeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.rollback_node_pool_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_rollback_node_pool_upgrade_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rollback_node_pool_upgrade(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool_id == "node_pool_id_value"
assert args[0].name == "name_value"
def test_rollback_node_pool_upgrade_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rollback_node_pool_upgrade(
cluster_service.RollbackNodePoolUpgradeRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_rollback_node_pool_upgrade_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rollback_node_pool_upgrade(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].node_pool_id == "node_pool_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_rollback_node_pool_upgrade_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rollback_node_pool_upgrade(
cluster_service.RollbackNodePoolUpgradeRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
node_pool_id="node_pool_id_value",
name="name_value",
)
def test_set_node_pool_management(
transport: str = "grpc", request_type=cluster_service.SetNodePoolManagementRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_node_pool_management(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolManagementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_node_pool_management_from_dict():
test_set_node_pool_management(request_type=dict)
def test_set_node_pool_management_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_management), "__call__"
) as call:
client.set_node_pool_management()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolManagementRequest()
@pytest.mark.asyncio
async def test_set_node_pool_management_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.SetNodePoolManagementRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_node_pool_management(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolManagementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_node_pool_management_async_from_dict():
await test_set_node_pool_management_async(request_type=dict)
def test_set_node_pool_management_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNodePoolManagementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_management), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_node_pool_management(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_node_pool_management_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNodePoolManagementRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_management), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_node_pool_management(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_labels(
transport: str = "grpc", request_type=cluster_service.SetLabelsRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_labels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLabelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_labels_from_dict():
test_set_labels(request_type=dict)
def test_set_labels_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
client.set_labels()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLabelsRequest()
@pytest.mark.asyncio
async def test_set_labels_async(
transport: str = "grpc_asyncio", request_type=cluster_service.SetLabelsRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_labels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLabelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_labels_async_from_dict():
await test_set_labels_async(request_type=dict)
def test_set_labels_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLabelsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_labels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_labels_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLabelsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_labels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_legacy_abac(
transport: str = "grpc", request_type=cluster_service.SetLegacyAbacRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_legacy_abac(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLegacyAbacRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_legacy_abac_from_dict():
test_set_legacy_abac(request_type=dict)
def test_set_legacy_abac_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
client.set_legacy_abac()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLegacyAbacRequest()
@pytest.mark.asyncio
async def test_set_legacy_abac_async(
transport: str = "grpc_asyncio", request_type=cluster_service.SetLegacyAbacRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_legacy_abac(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetLegacyAbacRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_legacy_abac_async_from_dict():
await test_set_legacy_abac_async(request_type=dict)
def test_set_legacy_abac_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLegacyAbacRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_legacy_abac(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_legacy_abac_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetLegacyAbacRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_legacy_abac(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_legacy_abac_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_legacy_abac(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
enabled=True,
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].enabled == True
assert args[0].name == "name_value"
def test_set_legacy_abac_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_legacy_abac(
cluster_service.SetLegacyAbacRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
enabled=True,
name="name_value",
)
@pytest.mark.asyncio
async def test_set_legacy_abac_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_legacy_abac(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
enabled=True,
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].enabled == True
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_legacy_abac_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_legacy_abac(
cluster_service.SetLegacyAbacRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
enabled=True,
name="name_value",
)
def test_start_ip_rotation(
transport: str = "grpc", request_type=cluster_service.StartIPRotationRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.start_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.StartIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_start_ip_rotation_from_dict():
test_start_ip_rotation(request_type=dict)
def test_start_ip_rotation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
client.start_ip_rotation()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.StartIPRotationRequest()
@pytest.mark.asyncio
async def test_start_ip_rotation_async(
transport: str = "grpc_asyncio", request_type=cluster_service.StartIPRotationRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.start_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.StartIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_start_ip_rotation_async_from_dict():
await test_start_ip_rotation_async(request_type=dict)
def test_start_ip_rotation_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.StartIPRotationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.start_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_start_ip_rotation_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.StartIPRotationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.start_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_start_ip_rotation_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.start_ip_rotation(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
def test_start_ip_rotation_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.start_ip_rotation(
cluster_service.StartIPRotationRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_start_ip_rotation_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.start_ip_rotation(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_start_ip_rotation_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.start_ip_rotation(
cluster_service.StartIPRotationRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
def test_complete_ip_rotation(
transport: str = "grpc", request_type=cluster_service.CompleteIPRotationRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.complete_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CompleteIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_complete_ip_rotation_from_dict():
test_complete_ip_rotation(request_type=dict)
def test_complete_ip_rotation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
client.complete_ip_rotation()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CompleteIPRotationRequest()
@pytest.mark.asyncio
async def test_complete_ip_rotation_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.CompleteIPRotationRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.complete_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.CompleteIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_complete_ip_rotation_async_from_dict():
await test_complete_ip_rotation_async(request_type=dict)
def test_complete_ip_rotation_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CompleteIPRotationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.complete_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_complete_ip_rotation_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.CompleteIPRotationRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.complete_ip_rotation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_complete_ip_rotation_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.complete_ip_rotation(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
def test_complete_ip_rotation_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.complete_ip_rotation(
cluster_service.CompleteIPRotationRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
@pytest.mark.asyncio
async def test_complete_ip_rotation_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.complete_ip_rotation(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_complete_ip_rotation_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.complete_ip_rotation(
cluster_service.CompleteIPRotationRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
name="name_value",
)
def test_set_node_pool_size(
transport: str = "grpc", request_type=cluster_service.SetNodePoolSizeRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_size), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_node_pool_size(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolSizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_node_pool_size_from_dict():
test_set_node_pool_size(request_type=dict)
def test_set_node_pool_size_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_size), "__call__"
) as call:
client.set_node_pool_size()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolSizeRequest()
@pytest.mark.asyncio
async def test_set_node_pool_size_async(
transport: str = "grpc_asyncio", request_type=cluster_service.SetNodePoolSizeRequest
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_size), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_node_pool_size(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNodePoolSizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_node_pool_size_async_from_dict():
await test_set_node_pool_size_async(request_type=dict)
def test_set_node_pool_size_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNodePoolSizeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_size), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_node_pool_size(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_node_pool_size_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNodePoolSizeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_node_pool_size), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_node_pool_size(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_network_policy(
transport: str = "grpc", request_type=cluster_service.SetNetworkPolicyRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_network_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNetworkPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_network_policy_from_dict():
test_set_network_policy(request_type=dict)
def test_set_network_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
client.set_network_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNetworkPolicyRequest()
@pytest.mark.asyncio
async def test_set_network_policy_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.SetNetworkPolicyRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_network_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetNetworkPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_network_policy_async_from_dict():
await test_set_network_policy_async(request_type=dict)
def test_set_network_policy_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNetworkPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_network_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_network_policy_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetNetworkPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_network_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_network_policy_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_network_policy(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
network_policy=cluster_service.NetworkPolicy(
provider=cluster_service.NetworkPolicy.Provider.CALICO
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].network_policy == cluster_service.NetworkPolicy(
provider=cluster_service.NetworkPolicy.Provider.CALICO
)
assert args[0].name == "name_value"
def test_set_network_policy_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_network_policy(
cluster_service.SetNetworkPolicyRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
network_policy=cluster_service.NetworkPolicy(
provider=cluster_service.NetworkPolicy.Provider.CALICO
),
name="name_value",
)
@pytest.mark.asyncio
async def test_set_network_policy_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_network_policy(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
network_policy=cluster_service.NetworkPolicy(
provider=cluster_service.NetworkPolicy.Provider.CALICO
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].network_policy == cluster_service.NetworkPolicy(
provider=cluster_service.NetworkPolicy.Provider.CALICO
)
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_network_policy_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_network_policy(
cluster_service.SetNetworkPolicyRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
network_policy=cluster_service.NetworkPolicy(
provider=cluster_service.NetworkPolicy.Provider.CALICO
),
name="name_value",
)
def test_set_maintenance_policy(
transport: str = "grpc", request_type=cluster_service.SetMaintenancePolicyRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
response = client.set_maintenance_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMaintenancePolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
def test_set_maintenance_policy_from_dict():
test_set_maintenance_policy(request_type=dict)
def test_set_maintenance_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
client.set_maintenance_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMaintenancePolicyRequest()
@pytest.mark.asyncio
async def test_set_maintenance_policy_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.SetMaintenancePolicyRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
name="name_value",
zone="zone_value",
operation_type=cluster_service.Operation.Type.CREATE_CLUSTER,
status=cluster_service.Operation.Status.PENDING,
detail="detail_value",
status_message="status_message_value",
self_link="self_link_value",
target_link="target_link_value",
location="location_value",
start_time="start_time_value",
end_time="end_time_value",
)
)
response = await client.set_maintenance_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.SetMaintenancePolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
assert response.zone == "zone_value"
assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER
assert response.status == cluster_service.Operation.Status.PENDING
assert response.detail == "detail_value"
assert response.status_message == "status_message_value"
assert response.self_link == "self_link_value"
assert response.target_link == "target_link_value"
assert response.location == "location_value"
assert response.start_time == "start_time_value"
assert response.end_time == "end_time_value"
@pytest.mark.asyncio
async def test_set_maintenance_policy_async_from_dict():
await test_set_maintenance_policy_async(request_type=dict)
def test_set_maintenance_policy_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetMaintenancePolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
call.return_value = cluster_service.Operation()
client.set_maintenance_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_maintenance_policy_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.SetMaintenancePolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
await client.set_maintenance_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_maintenance_policy_flattened():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_maintenance_policy(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
maintenance_policy=cluster_service.MaintenancePolicy(
window=cluster_service.MaintenanceWindow(
daily_maintenance_window=cluster_service.DailyMaintenanceWindow(
start_time="start_time_value"
)
)
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(
window=cluster_service.MaintenanceWindow(
daily_maintenance_window=cluster_service.DailyMaintenanceWindow(
start_time="start_time_value"
)
)
)
assert args[0].name == "name_value"
def test_set_maintenance_policy_flattened_error():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_maintenance_policy(
cluster_service.SetMaintenancePolicyRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
maintenance_policy=cluster_service.MaintenancePolicy(
window=cluster_service.MaintenanceWindow(
daily_maintenance_window=cluster_service.DailyMaintenanceWindow(
start_time="start_time_value"
)
)
),
name="name_value",
)
@pytest.mark.asyncio
async def test_set_maintenance_policy_flattened_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_maintenance_policy(
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
maintenance_policy=cluster_service.MaintenancePolicy(
window=cluster_service.MaintenanceWindow(
daily_maintenance_window=cluster_service.DailyMaintenanceWindow(
start_time="start_time_value"
)
)
),
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].zone == "zone_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(
window=cluster_service.MaintenanceWindow(
daily_maintenance_window=cluster_service.DailyMaintenanceWindow(
start_time="start_time_value"
)
)
)
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_set_maintenance_policy_flattened_error_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_maintenance_policy(
cluster_service.SetMaintenancePolicyRequest(),
project_id="project_id_value",
zone="zone_value",
cluster_id="cluster_id_value",
maintenance_policy=cluster_service.MaintenancePolicy(
window=cluster_service.MaintenanceWindow(
daily_maintenance_window=cluster_service.DailyMaintenanceWindow(
start_time="start_time_value"
)
)
),
name="name_value",
)
def test_list_usable_subnetworks(
transport: str = "grpc", request_type=cluster_service.ListUsableSubnetworksRequest
):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListUsableSubnetworksResponse(
next_page_token="next_page_token_value",
)
response = client.list_usable_subnetworks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListUsableSubnetworksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListUsableSubnetworksPager)
assert response.next_page_token == "next_page_token_value"
def test_list_usable_subnetworks_from_dict():
test_list_usable_subnetworks(request_type=dict)
def test_list_usable_subnetworks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
client.list_usable_subnetworks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListUsableSubnetworksRequest()
@pytest.mark.asyncio
async def test_list_usable_subnetworks_async(
transport: str = "grpc_asyncio",
request_type=cluster_service.ListUsableSubnetworksRequest,
):
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListUsableSubnetworksResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_usable_subnetworks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cluster_service.ListUsableSubnetworksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_usable_subnetworks_async_from_dict():
await test_list_usable_subnetworks_async(request_type=dict)
def test_list_usable_subnetworks_field_headers():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListUsableSubnetworksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
call.return_value = cluster_service.ListUsableSubnetworksResponse()
client.list_usable_subnetworks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_usable_subnetworks_field_headers_async():
client = ClusterManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cluster_service.ListUsableSubnetworksRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListUsableSubnetworksResponse()
)
await client.list_usable_subnetworks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_usable_subnetworks_pager():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
next_page_token="abc",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[], next_page_token="def",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[cluster_service.UsableSubnetwork(),],
next_page_token="ghi",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_usable_subnetworks(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cluster_service.UsableSubnetwork) for i in results)
def test_list_usable_subnetworks_pages():
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
next_page_token="abc",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[], next_page_token="def",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[cluster_service.UsableSubnetwork(),],
next_page_token="ghi",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
),
RuntimeError,
)
pages = list(client.list_usable_subnetworks(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_usable_subnetworks_async_pager():
client = ClusterManagerAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
next_page_token="abc",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[], next_page_token="def",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[cluster_service.UsableSubnetwork(),],
next_page_token="ghi",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
),
RuntimeError,
)
async_pager = await client.list_usable_subnetworks(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cluster_service.UsableSubnetwork) for i in responses)
@pytest.mark.asyncio
async def test_list_usable_subnetworks_async_pages():
client = ClusterManagerAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_usable_subnetworks),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
next_page_token="abc",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[], next_page_token="def",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[cluster_service.UsableSubnetwork(),],
next_page_token="ghi",
),
cluster_service.ListUsableSubnetworksResponse(
subnetworks=[
cluster_service.UsableSubnetwork(),
cluster_service.UsableSubnetwork(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_usable_subnetworks(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ClusterManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ClusterManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ClusterManagerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ClusterManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ClusterManagerClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ClusterManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ClusterManagerClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ClusterManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ClusterManagerGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.ClusterManagerGrpcTransport,
transports.ClusterManagerGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ClusterManagerClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ClusterManagerGrpcTransport,)
def test_cluster_manager_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ClusterManagerTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_cluster_manager_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ClusterManagerTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_clusters",
"get_cluster",
"create_cluster",
"update_cluster",
"update_node_pool",
"set_node_pool_autoscaling",
"set_logging_service",
"set_monitoring_service",
"set_addons_config",
"set_locations",
"update_master",
"set_master_auth",
"delete_cluster",
"list_operations",
"get_operation",
"cancel_operation",
"get_server_config",
"get_json_web_keys",
"list_node_pools",
"get_node_pool",
"create_node_pool",
"delete_node_pool",
"rollback_node_pool_upgrade",
"set_node_pool_management",
"set_labels",
"set_legacy_abac",
"start_ip_rotation",
"complete_ip_rotation",
"set_node_pool_size",
"set_network_policy",
"set_maintenance_policy",
"list_usable_subnetworks",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_cluster_manager_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ClusterManagerTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_cluster_manager_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ClusterManagerTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_cluster_manager_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ClusterManagerTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_cluster_manager_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ClusterManagerClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_cluster_manager_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ClusterManagerClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ClusterManagerGrpcTransport,
transports.ClusterManagerGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_cluster_manager_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ClusterManagerGrpcTransport,
transports.ClusterManagerGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_cluster_manager_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ClusterManagerGrpcTransport, grpc_helpers),
(transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_gte_1_26_0
def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"container.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="container.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ClusterManagerGrpcTransport, grpc_helpers),
(transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_cluster_manager_transport_create_channel_old_api_core(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"container.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ClusterManagerGrpcTransport, grpc_helpers),
(transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_cluster_manager_transport_create_channel_user_scopes(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"container.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ClusterManagerGrpcTransport,
transports.ClusterManagerGrpcAsyncIOTransport,
],
)
def test_cluster_manager_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_cluster_manager_host_no_port():
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="container.googleapis.com"
),
)
assert client.transport._host == "container.googleapis.com:443"
def test_cluster_manager_host_with_port():
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="container.googleapis.com:8000"
),
)
assert client.transport._host == "container.googleapis.com:8000"
def test_cluster_manager_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ClusterManagerGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cluster_manager_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ClusterManagerGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ClusterManagerGrpcTransport,
transports.ClusterManagerGrpcAsyncIOTransport,
],
)
def test_cluster_manager_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ClusterManagerGrpcTransport,
transports.ClusterManagerGrpcAsyncIOTransport,
],
)
def test_cluster_manager_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ClusterManagerClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ClusterManagerClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ClusterManagerClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ClusterManagerClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ClusterManagerClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ClusterManagerClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ClusterManagerClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ClusterManagerClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ClusterManagerClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ClusterManagerClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ClusterManagerClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ClusterManagerClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ClusterManagerClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ClusterManagerClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ClusterManagerClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ClusterManagerTransport, "_prep_wrapped_messages"
) as prep:
client = ClusterManagerClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ClusterManagerTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ClusterManagerClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 39.541999
| 118
| 0.688467
| 43,555
| 367,187
| 5.542418
| 0.012123
| 0.047556
| 0.024126
| 0.050124
| 0.970915
| 0.961703
| 0.946284
| 0.932353
| 0.921446
| 0.908989
| 0
| 0.004307
| 0.227971
| 367,187
| 9,285
| 119
| 39.546257
| 0.847255
| 0.194163
| 0
| 0.767045
| 0
| 0
| 0.115044
| 0.020297
| 0
| 0
| 0
| 0.000108
| 0.21463
| 1
| 0.034788
| false
| 0
| 0.003883
| 0.000311
| 0.038981
| 0.000621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82ca28b484198c259abeab4a07bbd20357d0459e
| 21,489
|
py
|
Python
|
robin_stocks/options.py
|
shobnaren/robin_stocks
|
7f15bd3b73da3c01fe164316b259cc03ffab9ea0
|
[
"MIT"
] | 2
|
2020-03-29T07:12:33.000Z
|
2021-01-16T17:27:10.000Z
|
robin_stocks/options.py
|
shobnaren/robin_stocks
|
7f15bd3b73da3c01fe164316b259cc03ffab9ea0
|
[
"MIT"
] | null | null | null |
robin_stocks/options.py
|
shobnaren/robin_stocks
|
7f15bd3b73da3c01fe164316b259cc03ffab9ea0
|
[
"MIT"
] | null | null | null |
"""Contains functions for getting information about options."""
import robin_stocks.helper as helper
import robin_stocks.urls as urls
@helper.login_required
def get_aggregate_positions(info=None):
"""Collapses all option orders for a stock into a single dictionary.
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.aggregate()
data = helper.request_get(url, 'pagination')
return helper.data_filter(data, info)
@helper.login_required
def get_market_options(info=None):
"""Returns a list of all options.
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_orders()
data = helper.request_get(url, 'pagination')
return helper.data_filter(data, info)
@helper.login_required
def get_all_option_positions(info=None):
"""Returns all option positions ever held for the account.
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_positions()
data = helper.request_get(url, 'pagination')
return helper.data_filter(data, info)
@helper.login_required
def get_open_option_positions(info=None):
"""Returns all open option positions for the account.
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_positions()
payload = {'nonzero': 'True'}
data = helper.request_get(url, 'pagination', payload)
return helper.data_filter(data, info)
def get_chains(symbol, info=None):
"""Returns the chain information of an option.
:param symbol: The ticker of the stock.
:type symbol: str
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message)
return None
url = urls.chains(symbol)
data = helper.request_get(url)
return helper.data_filter(data, info)
def find_tradable_options_for_stock(symbol, option_type='both', info=None):
"""Returns a list of all available options for a stock.
:param symbol: The ticker of the stock.
:type symbol: str
:param option_type: Can be either 'call' or 'put' or left blank to get both.
:type option_type: Optional[str]
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all calls of the stock. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
url = urls.option_instruments()
if option_type == 'call' or option_type == 'put':
payload = {'chain_id': helper.id_for_chain(symbol),
'state': 'active',
'tradability': 'tradable',
'type': option_type}
else:
payload = {'chain_id': helper.id_for_chain(symbol),
'state': 'active',
'tradability': 'tradable'}
data = helper.request_get(url, 'pagination', payload)
return helper.data_filter(data, info)
def id_of_options_to_close(symbol, expiration_date, strike, option_type, count=0, _type='long'):
"""
:param symbol:
:param expiration_date:
:param strike:
:param option_type:
:param count:
:param _type:
:return: only when option exists in open position else return None
"""
data = get_open_option_positions()
msg = "ZERO holdings in open position to close"
for item in filter(lambda x: symbol == x['chain_symbol'] and _type == x['type'], data):
per_data = helper.request_get(item['option'])
print(per_data)
if per_data['expiration_date'] == expiration_date and float(per_data["strike_price"]) == float(strike) and \
per_data['type'] == option_type:
if int(count) <= int(float(item['quantity'])):
return per_data['id']
else:
msg = "NOT enough quantity to close. holding {} < {}".format(count, int(float(item['quantity'])))
print(msg)
return None
def find_options_for_stock_by_expiration(symbol, expiration_date, option_type='both', info=None):
"""Returns a list of all the option orders that match the seach parameters
:param symbol: The ticker of the stock.
:type symbol: str
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param option_type: Can be either 'call' or 'put' or leave blank to get both.
:type option_type: Optional[str]
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
all_options = find_tradable_options_for_stock(symbol, option_type)
filtered_options = [item for item in all_options if item["expiration_date"] == expiration_date
and item['tradability'] == 'tradable']
for item in filtered_options:
market_data = get_option_market_data_by_id(item['id'])
item.update(market_data)
return helper.data_filter(filtered_options, info)
def find_options_for_stock_by_strike(symbol, strike, option_type='both', info=None):
"""Returns a list of all the option orders that match the seach parameters
:param symbol: The ticker of the stock.
:type symbol: str
:param strike: Represents the price of the option.
:type strike: str
:param option_type: Can be either 'call' or 'put' or leave blank to get both.
:type option_type: Optional[str]
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
all_options = find_tradable_options_for_stock(symbol, option_type)
filtered_options = [item for item in all_options if float(item["strike_price"]) == float(strike)
and item['tradability'] == 'tradable']
for item in filtered_options:
market_data = get_option_market_data_by_id(item['id'])
item.update(market_data)
return helper.data_filter(filtered_options, info)
def find_options_for_stock_by_expiration_and_strike(symbol, expiration_date, strike, option_type='both', info=None):
"""Returns a list of all the option orders that match the seach parameters
:param symbol: The ticker of the stock.
:type symbol: str
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param strike: Represents the price of the option.
:type strike: str
:param option_type: Can be either 'call' or 'put' or leave blank to get both.
:type option_type: Optional[str]
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
all_options = find_tradable_options_for_stock(symbol, option_type)
filtered_options = [item for item in all_options if
item["expiration_date"] == expiration_date and float(item["strike_price"]) == float(strike)
and item['tradability'] == 'tradable']
for item in filtered_options:
market_data = get_option_market_data_by_id(item['id'])
item.update(market_data)
return helper.data_filter(filtered_options, info)
def find_options_for_list_of_stocks_by_expiration_date(input_symbols, expiration_date, option_type='both', info=None):
"""Returns a list of all the option orders that match the seach parameters
:param input_symbols: May be a single stock ticker or a list of stock tickers.
:type input_symbols: str or list
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param option_type: Can be either 'call' or 'put' or leave blank to get both.
:type option_type: Optional[str]
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(input_symbols)
try:
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
data = []
url = urls.option_instruments()
for symbol in symbols:
if option_type == 'put' or option_type == 'call':
payload = {'chain_id': helper.id_for_chain(symbol),
'expiration_date': expiration_date,
'state': 'active',
'tradability': 'tradable',
'rhs_tradability': 'tradable',
'type': option_type}
else:
payload = {'chain_id': helper.id_for_chain(symbol),
'expiration_date': expiration_date,
'state': 'active',
'tradability': 'tradable',
'rhs_tradability': 'tradable'}
other_data = helper.request_get(url, 'pagination', payload)
for item in other_data:
if item['expiration_date'] == expiration_date and item['tradability'] == 'tradable':
data.append(item)
for item in data:
market_data = get_option_market_data_by_id(item['id'])
item.update(market_data)
return helper.data_filter(data, info)
def get_list_market_data(input_symbols, expiration_date, info=None):
"""Returns a list of option market data for several stock tickers.
:param input_symbols: May be a single stock ticker or a list of stock tickers.
:type input_symbols: str or list
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all stock option market data. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(input_symbols)
ids = []
data = []
url = urls.option_instruments()
for symbol in symbols:
payload = {'chain_id': helper.id_for_chain(symbol),
'expiration_date': expiration_date,
'state': 'active',
'tradability': 'tradable',
'rhs_tradability': 'tradable'}
other_data = helper.request_get(url, 'pagination', payload)
for item in other_data:
if item['expiration_date'] == expiration_date and item['tradability'] == 'tradable':
ids.append(item['id'])
for _id in ids:
url = urls.marketdata_options(_id)
other_data = helper.request_get(url)
data.append(other_data)
return helper.data_filter(data, info)
def get_list_options_of_specific_profitability(input_symbols, expiration_date, type_profit="chance_of_profit_short",
profit_floor=0.0, profit_ceiling=1.0, info=None):
"""Returns a list of option market data for several stock tickers that match a range of profitability.
:param input_symbols: May be a single stock ticker or a list of stock tickers.
:type input_symbols: str or list
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param type_profit: Will either be "chance_of_profit_short" or "chance_of_profit_long".
:type type_profit: str
:param profit_floor: The lower percentage on scale 0 to 1.
:type profit_floor: int
:param profit_ceiling: The higher percentage on scale 0 to 1.
:type profit_ceiling: int
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all stock option market data. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(input_symbols)
ids = []
data = []
return_data = []
url = urls.option_instruments()
if type_profit != "chance_of_profit_short" and type_profit != "chance_of_profit_long":
print("Invalid string for 'type_profit'. Defaulting to 'chance_of_profit_short'.")
type_profit = "chance_of_profit_short"
for symbol in symbols:
payload = {'chain_id': helper.id_for_chain(symbol),
'expiration_date': expiration_date,
'state': 'active',
'tradability': 'tradable',
'rhs_tradability': 'tradable'}
other_data = helper.request_get(url, 'pagination', payload)
for item in other_data:
if item['tradability'] == 'tradable':
ids.append(item['id'])
for _id in ids:
url = urls.marketdata_options(_id)
other_data = helper.request_get(url)
data.append(other_data)
for item in data:
try:
float_value = float(item[type_profit])
if profit_floor < float_value < profit_ceiling:
return_data.append(item)
except Exception as e:
# ToDo: check on exception handling
msg = "Error string {}".format(e)
print(msg)
pass
return helper.data_filter(return_data, info)
def get_option_market_data_by_id(_id, info=None):
"""Returns the option market data for a stock, including the greeks,
open interest, change of profit, and adjusted mark price.
:param _id: The id of the stock.
:type _id: str
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
url = urls.marketdata_options(_id)
data = helper.request_get(url)
return helper.data_filter(data, info)
def get_option_market_data(symbol, expiration_date, strike, option_type, info=None):
"""Returns the option market data for the stock option, including the greeks,
open interest, change of profit, and adjusted mark price.
:param symbol: The ticker of the stock.
:type symbol: str
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param strike: Represents the price of the option.
:type strike: str
:param option_type: Can be either 'call' or 'put'.
:type option_type: str
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
option_id = helper.id_for_option(symbol, expiration_date, strike, option_type)
url = urls.marketdata_options(option_id)
data = helper.request_get(url)
return helper.data_filter(data, info)
def get_option_instrument_data_by_id(_id, info=None):
"""Returns the option instrument information.
:param _id: The id of the stock.
:type _id: str
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
url = urls.option_instruments(_id)
data = helper.request_get(url)
return helper.data_filter(data, info)
def get_option_instrument_data(symbol, expiration_date, strike, option_type, info=None):
"""Returns the option instrument data for the stock option.
:param symbol: The ticker of the stock.
:type symbol: str
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param strike: Represents the price of the option.
:type strike: str
:param option_type: Can be either 'call' or 'put'.
:type option_type: str
:param info: Will data_filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
option_id = helper.id_for_option(symbol, expiration_date, strike, option_type)
url = urls.option_instruments(option_id)
data = helper.request_get(url)
return helper.data_filter(data, info)
def get_option_historicals(symbol, expiration_date, strike, option_type, span='week'):
"""Returns the data that is used to make the graphs.
:param symbol: The ticker of the stock.
:type symbol: str
:param expiration_date: Represents the expiration date in the format YYYY-MM-DD.
:type expiration_date: str
:param strike: Represents the price of the option.
:type strike: str
:param option_type: Can be either 'call' or 'put'.
:type option_type: str
:param span: Sets the range of the data to be either 'day', 'week', 'year', or '5year'. Default is 'week'.
:type span: Optional[str]
:returns: Returns a list that contains a list for each symbol. \
Each list contains a dictionary where each dictionary is for a different time.
"""
try:
symbol = symbol.upper().strip()
option_type = option_type.lower().strip()
except AttributeError as message:
print(message)
return [None]
span_check = ['day', 'week', 'year', '5year']
if span not in span_check:
print('ERROR: Span must be "day","week","year",or "5year"')
return [None]
if span == 'day':
interval = '5minute'
elif span == 'week':
interval = '10minute'
elif span == 'year':
interval = 'day'
else:
interval = 'week'
option_id = helper.id_for_option(symbol, expiration_date, strike, option_type)
url = urls.option_historicals(option_id)
payload = {'span': span,
'interval': interval}
data = helper.request_get(url, 'regular', payload)
return data
| 39.357143
| 128
| 0.675834
| 2,972
| 21,489
| 4.741925
| 0.068977
| 0.042574
| 0.016888
| 0.018875
| 0.852125
| 0.832399
| 0.808912
| 0.801178
| 0.792379
| 0.782871
| 0
| 0.000916
| 0.238168
| 21,489
| 545
| 129
| 39.429358
| 0.859935
| 0.448834
| 0
| 0.701613
| 0
| 0
| 0.106514
| 0.012106
| 0
| 0
| 0
| 0.001835
| 0
| 1
| 0.072581
| false
| 0.004032
| 0.008065
| 0
| 0.197581
| 0.056452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82cf5f271090388d02bcb8358588faffc7a690ac
| 6,915
|
py
|
Python
|
tests/services/storage/test_auth.py
|
PixelogicDev/py42
|
ccb100b03025fff1a060a39635bee3e76a251a85
|
[
"MIT"
] | null | null | null |
tests/services/storage/test_auth.py
|
PixelogicDev/py42
|
ccb100b03025fff1a060a39635bee3e76a251a85
|
[
"MIT"
] | null | null | null |
tests/services/storage/test_auth.py
|
PixelogicDev/py42
|
ccb100b03025fff1a060a39635bee3e76a251a85
|
[
"MIT"
] | null | null | null |
import pytest
from requests import Request
from py42.services.storage._auth import FileArchiveTmpAuth
from py42.services.storage._auth import SecurityArchiveTmpAuth
from py42.services.storage._auth import V1Auth
TEST_USER_ID = "0123456789"
TEST_DEVICE_GUID = "testdeviceguid"
TEST_DESTINATION_GUID = "testdestinationguid"
TEST_PLAN_UID = "testplanuid"
@pytest.fixture
def mock_request(mocker):
mock = mocker.MagicMock(spec=Request)
mock.headers = {}
return mock
@pytest.fixture
def mock_tmp_auth_conn(mock_connection, py42_response):
py42_response.text = (
'{"serverUrl": "testhost.com", "loginToken": "TEST_TMP_TOKEN_VALUE"}'
)
mock_connection.post.return_value = py42_response
return mock_connection
@pytest.fixture
def mock_v1_auth_conn(mock_connection, py42_response):
py42_response.text = '["TEST_V1", "TOKEN_VALUE"]'
mock_connection.post.return_value = py42_response
return mock_connection
class TestFileArchiveTmpAuth(object):
def test_call_returns_request_with_expected_header(
self, mock_tmp_auth_conn, mock_request
):
auth = FileArchiveTmpAuth(
mock_tmp_auth_conn, TEST_USER_ID, TEST_DEVICE_GUID, TEST_DESTINATION_GUID
)
request = auth(mock_request)
assert request.headers["Authorization"] == "login_token TEST_TMP_TOKEN_VALUE"
def test_call_only_calls_auth_api_first_time(
self, mock_tmp_auth_conn, mock_request
):
auth = FileArchiveTmpAuth(
mock_tmp_auth_conn, TEST_USER_ID, TEST_DEVICE_GUID, TEST_DESTINATION_GUID
)
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 1
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 1
def test_call_calls_auth_api_with_expected_url(
self, mock_tmp_auth_conn, mock_request
):
auth = FileArchiveTmpAuth(
mock_tmp_auth_conn, TEST_USER_ID, TEST_DEVICE_GUID, TEST_DESTINATION_GUID
)
auth(mock_request)
data = {
u"userId": TEST_USER_ID,
u"sourceGuid": TEST_DEVICE_GUID,
u"destinationGuid": TEST_DESTINATION_GUID,
}
mock_tmp_auth_conn.post.assert_called_once_with("/api/LoginToken", json=data)
def test_clear_credentials_causes_auth_api_to_be_called_on_subsequent_calls(
self, mock_tmp_auth_conn, mock_request
):
auth = FileArchiveTmpAuth(
mock_tmp_auth_conn, TEST_USER_ID, TEST_DEVICE_GUID, TEST_DESTINATION_GUID
)
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 1
auth.clear_credentials()
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 2
def test_get_storage_url_returns_expected_value(self, mock_tmp_auth_conn):
auth = FileArchiveTmpAuth(
mock_tmp_auth_conn, TEST_USER_ID, TEST_DEVICE_GUID, TEST_DESTINATION_GUID
)
assert auth.get_storage_url() == "testhost.com"
def test_get_storage_url_only_calls_auth_api_first_time(self, mock_tmp_auth_conn):
auth = FileArchiveTmpAuth(
mock_tmp_auth_conn, TEST_USER_ID, TEST_DEVICE_GUID, TEST_DESTINATION_GUID
)
auth.get_storage_url()
assert mock_tmp_auth_conn.post.call_count == 1
auth.get_storage_url()
assert mock_tmp_auth_conn.post.call_count == 1
class TestSecurityArchiveTmpAuth(object):
def test_call_returns_request_with_expected_header(
self, mock_tmp_auth_conn, mock_request
):
auth = SecurityArchiveTmpAuth(
mock_tmp_auth_conn, TEST_PLAN_UID, TEST_DESTINATION_GUID
)
request = auth(mock_request)
assert request.headers["Authorization"] == "login_token TEST_TMP_TOKEN_VALUE"
def test_call_only_calls_auth_api_first_time(
self, mock_tmp_auth_conn, mock_request
):
auth = SecurityArchiveTmpAuth(
mock_tmp_auth_conn, TEST_PLAN_UID, TEST_DESTINATION_GUID
)
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 1
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 1
def test_call_calls_auth_api_with_expected_url(
self, mock_tmp_auth_conn, mock_request
):
auth = SecurityArchiveTmpAuth(
mock_tmp_auth_conn, TEST_PLAN_UID, TEST_DESTINATION_GUID
)
auth(mock_request)
data = {u"planUid": TEST_PLAN_UID, u"destinationGuid": TEST_DESTINATION_GUID}
mock_tmp_auth_conn.post.assert_called_once_with(
"/api/StorageAuthToken", json=data
)
def test_clear_credentials_causes_auth_api_to_be_called_on_subsequent_calls(
self, mock_tmp_auth_conn, mock_request
):
auth = SecurityArchiveTmpAuth(
mock_tmp_auth_conn, TEST_PLAN_UID, TEST_DESTINATION_GUID
)
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 1
auth.clear_credentials()
auth(mock_request)
assert mock_tmp_auth_conn.post.call_count == 2
def test_get_storage_url_returns_expected_value(self, mock_tmp_auth_conn):
auth = SecurityArchiveTmpAuth(
mock_tmp_auth_conn, TEST_PLAN_UID, TEST_DESTINATION_GUID
)
assert auth.get_storage_url() == "testhost.com"
def test_get_storage_url_only_calls_auth_api_first_time(self, mock_tmp_auth_conn):
auth = SecurityArchiveTmpAuth(
mock_tmp_auth_conn, TEST_PLAN_UID, TEST_DESTINATION_GUID
)
auth.get_storage_url()
assert mock_tmp_auth_conn.post.call_count == 1
auth.get_storage_url()
assert mock_tmp_auth_conn.post.call_count == 1
class TestV1Auth(object):
def test_call_returns_request_with_expected_header(
self, mock_v1_auth_conn, mock_request
):
auth = V1Auth(mock_v1_auth_conn)
request = auth(mock_request)
assert request.headers["Authorization"] == "token TEST_V1-TOKEN_VALUE"
def test_call_only_calls_auth_api_first_time(self, mock_v1_auth_conn, mock_request):
auth = V1Auth(mock_v1_auth_conn)
auth(mock_request)
assert mock_v1_auth_conn.post.call_count == 1
auth(mock_request)
assert mock_v1_auth_conn.post.call_count == 1
def test_call_calls_auth_api_with_expected_url(
self, mock_v1_auth_conn, mock_request
):
auth = V1Auth(mock_v1_auth_conn)
auth(mock_request)
mock_v1_auth_conn.post.assert_called_once_with("/api/AuthToken")
def test_clear_credentials_causes_auth_api_to_be_called_on_subsequent_calls(
self, mock_v1_auth_conn, mock_request
):
auth = V1Auth(mock_v1_auth_conn)
auth(mock_request)
assert mock_v1_auth_conn.post.call_count == 1
auth.clear_credentials()
auth(mock_request)
assert mock_v1_auth_conn.post.call_count == 2
| 36.015625
| 88
| 0.713521
| 905
| 6,915
| 4.940331
| 0.096133
| 0.094833
| 0.095952
| 0.130843
| 0.884142
| 0.879893
| 0.857526
| 0.857526
| 0.83516
| 0.812794
| 0
| 0.012218
| 0.2188
| 6,915
| 191
| 89
| 36.204188
| 0.815439
| 0
| 0
| 0.707317
| 0
| 0
| 0.058134
| 0.006363
| 0
| 0
| 0
| 0
| 0.146341
| 1
| 0.115854
| false
| 0
| 0.030488
| 0
| 0.182927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82d17231af298c8439bedaf338e2f2894c6b308a
| 51,450
|
py
|
Python
|
graph/ap (0, 0, 0), 12 sect, mob 4 sect/dynamic/degree variation, 3 slots, 25 stations/with 0/max_dist.py
|
KESHAmambo/IEEE_802_11ad_beamforming_simulation
|
93328a41d9c044ee7596d02e360fb3b5f2250ec0
|
[
"MIT"
] | null | null | null |
graph/ap (0, 0, 0), 12 sect, mob 4 sect/dynamic/degree variation, 3 slots, 25 stations/with 0/max_dist.py
|
KESHAmambo/IEEE_802_11ad_beamforming_simulation
|
93328a41d9c044ee7596d02e360fb3b5f2250ec0
|
[
"MIT"
] | null | null | null |
graph/ap (0, 0, 0), 12 sect, mob 4 sect/dynamic/degree variation, 3 slots, 25 stations/with 0/max_dist.py
|
KESHAmambo/IEEE_802_11ad_beamforming_simulation
|
93328a41d9c044ee7596d02e360fb3b5f2250ec0
|
[
"MIT"
] | null | null | null |
"""
Distribution plot options
=========================
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
avgArr0 = [1331.200905890909, 1536.0009058909093, 1740.8009058909095, 1740.8009058909095, 1843.2002356363641, 1843.200570763637, 1843.200570763637, 1945.6002356363642, 1945.6009058909096, 1945.6009058909096, 1945.6009058909096, 1945.6009058909096, 2048.0009058909104, 2048.0009058909104, 2048.0009058909104, 2150.400235636365, 2150.4005707636375, 2150.4005707636375, 2150.4009058909105, 2150.4009058909105, 2150.4009058909105, 2252.800235636365, 2252.8005707636376, 2252.8009058909106, 2252.8009058909106, 2252.8009058909106, 2355.2005707636376, 2355.2005707636376, 2355.2009058909107, 2355.2009058909107, 2355.2009058909107, 2355.2009058909107, 2355.2009058909107, 2355.2009058909107, 2457.6005707636377, 2457.6005707636377, 2457.600905890911, 2457.600905890911, 2457.600905890911, 2560.000570763638, 2560.000570763638, 2560.000905890911, 2560.000905890911, 2560.000905890911, 2560.000905890911, 2560.000905890911, 2560.000905890911, 2662.400570763638, 2764.8002356363654, 2764.800570763638, 2764.800905890911, 2867.2002356363655, 2867.200905890911, 2867.200905890911, 2867.200905890911, 2867.200905890911, 2867.200905890911, 2969.6002356363656, 2969.6009058909112, 2969.6009058909112, 3072.0005707636383, 3072.0009058909113, 3174.400235636366, 3174.4005707636384, 3174.4005707636384, 3174.4009058909114, 3174.4009058909114, 3174.4009058909114, 3174.4009058909114, 3276.8005707636385, 3276.8009058909115, 3276.8009058909115, 3276.8009058909115, 3276.8009058909115, 3276.8009058909115, 3379.2005707636386, 3379.2009058909116, 3379.2009058909116, 3481.6009058909117, 3584.0005707636387, 3584.0005707636387, 3584.000905890912, 3686.400570763639, 3686.400905890912, 3788.800570763639, 3788.800905890912, 3891.2002356363664, 3993.600905890912, 4096.00090589091, 4096.00090589091, 4198.400570763637, 4198.40090589091, 4505.600235636363, 4505.6009058909085, 4608.000905890908, 5120.000570763634, 5427.200570763633, 5632.0009058909045, 7065.600905890899, 9216.000570763621]
avgArr1 = [409.60392203636366, 409.60492741818183, 409.60492741818183, 409.6052625454546, 409.6052625454546, 409.6052625454546, 409.6055976727273, 409.6055976727273, 409.6055976727273, 409.6059328, 409.60626792727277, 409.60626792727277, 409.60693818181824, 409.60727330909094, 409.60727330909094, 409.6079435636364, 409.6079435636364, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0022463999999, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0029166545454, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0032517818181, 512.0035869090908, 512.0035869090908, 512.0035869090908, 512.0035869090908, 512.0035869090908, 512.0035869090908, 512.0035869090908, 512.0039220363635, 512.0039220363635, 512.0039220363635, 512.0039220363635, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0045922909089, 512.0045922909089, 512.0045922909089, 512.0052625454545, 512.0052625454545, 512.0052625454545, 512.0052625454545, 512.0062679272726, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4012410181817, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4015761454544, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4019112727271, 614.4022463999999, 614.4022463999999, 614.4022463999999, 614.4022463999999, 614.4022463999999, 614.4022463999999, 614.4022463999999, 614.4022463999999, 614.4025815272726, 614.4025815272726, 614.4025815272726, 614.4025815272726, 614.4025815272726, 614.4025815272726, 614.4025815272726, 614.4029166545454, 614.4029166545454, 716.8002356363635, 716.8002356363635, 716.8002356363635, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8012410181817, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8015761454544, 716.8019112727271, 716.8019112727271, 716.8019112727271, 716.8019112727271, 716.8019112727271, 819.2005707636362, 819.2005707636362, 819.2009058909089, 819.2009058909089, 819.2009058909089, 819.2009058909089, 819.2009058909089, 819.2009058909089, 819.2009058909089, 819.2012410181817, 921.6005707636361, 1024.000905890909, 1024.000905890909]
avgArr3 = [204.81866763636367, 204.81933789090914, 204.81933789090914, 204.82335941818187, 204.82436480000004, 204.82469992727277, 204.82537018181822, 204.82570530909095, 204.82604043636368, 204.82604043636368, 204.82604043636368, 204.82604043636368, 204.82637556363642, 204.82637556363642, 204.82671069090912, 204.82671069090912, 204.82704581818186, 204.83106734545458, 204.83341323636367, 204.83441861818187, 204.83542400000005, 204.8360942545455, 204.83676450909095, 204.83676450909095, 204.8370996363637, 204.8374347636364, 204.8374347636364, 204.8374347636364, 204.83776989090913, 204.83776989090913, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.6015761454546, 409.6015761454546, 409.6015761454546, 409.6015761454546, 409.6015761454546, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6019112727273, 409.6025815272728, 409.6025815272728, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0015761454545, 512.0019112727272, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0025815272726, 512.0029166545454, 512.0049274181818, 614.4002356363635, 614.4002356363635, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4012410181817, 614.4019112727271, 614.4022463999999, 614.4025815272726, 614.4035869090908, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 819.2009058909089, 819.2009058909089, 819.2009058909089, 819.2012410181817, 921.6005707636361, 921.6005707636361, 921.6009058909088]
avgArr5 = [204.81431098181824, 204.81431098181824, 204.81464610909094, 204.8153163636364, 204.8153163636364, 204.81565149090915, 204.81565149090915, 204.82201890909096, 204.82201890909096, 204.82335941818187, 204.8240296727273, 204.8240296727273, 204.82436480000004, 204.82469992727277, 204.8250350545455, 204.82537018181822, 204.82570530909095, 204.82570530909095, 204.82570530909095, 204.82604043636368, 204.82604043636368, 204.82604043636368, 204.82604043636368, 204.82637556363642, 204.82637556363642, 204.82637556363642, 204.82637556363642, 204.82671069090912, 204.82671069090912, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.82704581818186, 204.8337483636364, 204.8337483636364, 204.83642938181822, 204.83676450909095, 204.8370996363637, 204.83776989090913, 204.83776989090913, 204.8387752727273, 204.83911040000004, 204.83911040000004, 204.83944552727277, 204.8397806545455, 204.8397806545455, 204.8401157818182, 204.84045090909095, 204.84045090909095, 204.84145629090912, 204.8421265454546, 204.84279680000003, 204.84279680000003, 204.84313192727276, 204.84313192727276, 204.84313192727276, 204.84313192727276, 204.84380218181823, 204.84380218181823, 204.84380218181823, 204.84380218181823, 204.84380218181823, 204.84413730909094, 204.84413730909094, 204.84447243636367, 204.84447243636367, 204.84447243636367, 204.84447243636367, 204.84447243636367, 204.8448075636364, 204.84514269090914, 204.84514269090914, 204.84514269090914, 204.84514269090914, 204.84547781818185, 204.84547781818185, 204.84581294545458, 204.84581294545458, 204.84581294545458, 204.84581294545458, 204.84581294545458, 204.84581294545458, 204.84614807272732, 204.84614807272732, 204.84614807272732, 204.84614807272732, 204.84614807272732, 204.84648320000005, 204.84648320000005, 204.84648320000005, 204.84681832727279, 204.84681832727279, 204.84681832727279, 204.84681832727279, 204.84681832727279, 204.84681832727279, 204.85553163636368, 204.86357469090913, 204.86390981818187, 204.86390981818187, 204.8645800727273, 204.86491520000004, 204.86491520000004, 204.86592058181822, 204.86692596363642, 204.86759621818186, 204.86759621818186, 204.86759621818186, 204.86860160000003, 204.86960698181824, 204.87027723636368, 204.87027723636368, 204.8706123636364, 204.87228800000005, 204.8729582545455, 204.87329338181823, 204.8742987636364, 204.87463389090914, 204.87496901818187, 204.87496901818187, 204.8756392727273, 204.87597440000005, 204.87597440000005, 204.87597440000005, 204.87597440000005, 204.87630952727278, 204.8766446545455, 204.87697978181822, 204.87731490909096, 204.87731490909096, 204.8776500363637, 204.8776500363637, 204.8779851636364, 204.8779851636364, 204.8779851636364, 204.8779851636364, 204.87832029090913, 204.87865541818186, 204.87865541818186, 204.87865541818186, 204.8789905454546, 204.8789905454546, 204.8789905454546, 204.8789905454546, 204.8793256727273, 204.8793256727273, 204.87966080000004, 204.87999592727277, 204.87999592727277, 204.87999592727277, 204.8803310545455, 204.8803310545455, 204.8806661818182, 204.8806661818182, 204.8806661818182, 204.8806661818182, 204.8806661818182, 204.8806661818182, 204.88100130909095, 204.88100130909095, 204.88100130909095, 204.88100130909095, 204.88100130909095, 204.88100130909095, 204.88100130909095, 204.88133643636368, 204.88133643636368, 204.88133643636368, 204.88133643636368, 204.88133643636368, 204.88133643636368, 204.88803898181823, 204.89407127272733, 204.90781149090913, 204.90948712727277, 204.91317352727276, 204.9171950545455, 204.9185355636364, 204.9198760727273, 204.9232273454546, 204.92389760000003, 204.92389760000003, 204.9245678545455, 204.92624349090914, 204.92657861818185, 204.92691374545458, 204.92724887272732, 204.92892450909096, 204.92925963636367, 204.92925963636367, 204.9295947636364, 204.92992989090914, 204.93026501818187, 204.93127040000005, 204.93160552727278, 204.93194065454549, 204.9332811636364, 204.93529192727277, 204.93529192727277, 204.93529192727277, 204.9356270545455, 204.9356270545455, 204.9356270545455, 204.93629730909095, 204.93629730909095, 204.93663243636368, 204.93763781818186, 204.9379729454546, 204.9393134545455, 204.93964858181823, 204.93964858181823, 204.93964858181823, 204.93998370909097, 204.94098909090914, 204.98321512727279, 204.9982958545455, 205.0009768727273, 205.0056686545455, 205.00868480000005, 205.00901992727276, 205.02376552727276, 205.02443578181823, 205.0267816727273, 205.02979781818186, 205.03046807272733, 205.03180858181824, 205.03717061818188, 205.03784087272732, 205.03918138181822, 205.04052189090913, 205.04353803636369, 205.04387316363642, 205.18730763636367, 205.18965352727278, 205.21244218181823, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.60124101818184, 409.6015761454546, 409.6015761454546, 409.6015761454546, 409.6015761454546, 409.6015761454546, 409.6025815272728, 409.6025815272728, 409.6025815272728, 409.6025815272728, 409.6029166545455, 409.6029166545455, 409.60492741818183, 409.6052625454546, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0012410181818, 512.0025815272726, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4015761454544, 716.8009058909089, 716.8009058909089, 819.2009058909089]
avgArr7 = [204.84581294545458, 205.25031156363642, 307.2005707636364, 307.20090589090916, 307.20124101818186, 307.20124101818186, 307.2015761454546, 307.2015761454546, 307.20191127272733, 307.20191127272733, 307.20224640000004, 307.2025815272728, 307.2029166545455, 307.2032517818182, 307.2032517818182, 307.2032517818182, 307.203586909091, 307.203586909091, 307.2039220363637, 307.2039220363637, 307.2039220363637, 307.2039220363637, 307.2039220363637, 307.2039220363637, 307.2039220363637, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20425716363644, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.2052625454546, 307.2059328, 307.2062679272728, 307.2066030545455, 307.2066030545455, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20727330909097, 307.20760843636367, 307.20760843636367, 307.20760843636367, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20794356363643, 307.20827869090914, 307.20827869090914, 307.20827869090914, 307.20827869090914, 307.20827869090914, 307.20827869090914, 307.20827869090914, 307.20861381818185, 307.20861381818185, 307.20861381818185, 307.20861381818185, 307.2089489454546, 307.2089489454546, 307.2089489454546, 307.2089489454546, 307.2089489454546, 307.2089489454546, 307.2089489454546, 307.2089489454546, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2092840727273, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2096192000001, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 307.2099543272728, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60124101818184, 409.60124101818184, 409.6015761454546, 409.6015761454546, 409.6019112727273, 409.6022464, 409.6025815272728, 409.60358690909095, 409.60358690909095, 409.60392203636366, 409.6042571636364, 409.6042571636364, 409.6042571636364, 409.6045922909091, 409.6079435636364, 409.6079435636364, 409.6086138181818, 409.6092840727273, 409.6092840727273, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0012410181818, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0015761454545, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0019112727272, 512.0032517818181, 512.0032517818181, 512.0035869090908, 512.0035869090908, 512.0035869090908, 512.0039220363635, 512.0042571636362, 512.0042571636362, 512.0042571636362, 512.0089489454544, 512.0099543272726, 614.4002356363635, 614.4002356363635, 614.4002356363635, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4005707636362, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 716.8002356363635, 716.8005707636362, 716.8005707636362, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 716.8009058909089, 819.2009058909089, 819.2009058909089]
avgArr9 = [102.41900276363637, 102.4193378909091, 204.80023563636368, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80057076363641, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 204.80090589090915, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2002356363637, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.2005707636364, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20090589090916, 307.20124101818186, 307.20124101818186, 307.20124101818186, 307.20124101818186, 307.20124101818186, 307.20124101818186, 307.20124101818186, 307.2015761454546, 307.2015761454546, 307.2015761454546, 307.2015761454546, 307.2015761454546, 307.2015761454546, 307.2015761454546, 307.2015761454546, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20191127272733, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.20224640000004, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2025815272728, 307.2029166545455, 307.2029166545455, 307.203586909091, 307.20425716363644, 307.20425716363644, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20459229090915, 307.20492741818185, 307.20492741818185, 307.20492741818185, 307.20492741818185, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2052625454546, 307.2055976727273, 307.2055976727273, 307.2055976727273, 307.2055976727273, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2059328, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2062679272728, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.2066030545455, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20693818181826, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.20727330909097, 307.2116299636364, 307.2116299636364, 307.2123002181819, 307.2139758545455, 307.2139758545455, 307.2139758545455, 307.2139758545455, 307.21431098181824, 307.21464610909095, 307.2149812363637, 307.2156514909091, 307.2156514909091, 307.2156514909091, 307.2159866181819, 307.2159866181819, 307.2159866181819, 307.2163217454546, 307.2166568727273, 307.21699200000006, 307.21699200000006, 307.21699200000006, 307.21732712727277, 307.21732712727277, 307.21732712727277, 307.21766225454553, 307.21766225454553, 307.21766225454553, 307.21799738181824, 307.21799738181824, 307.21799738181824, 307.21833250909094, 307.21833250909094, 307.21833250909094, 307.2186676363637, 307.2186676363637, 307.2186676363637, 307.2190027636364, 307.2190027636364, 307.2190027636364, 307.2190027636364, 307.2190027636364, 307.2193378909091, 307.2193378909091, 307.2193378909091, 307.2193378909091, 307.2193378909091, 307.2193378909091, 307.2193378909091, 307.2193378909091, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60023563636366, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60057076363637, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60090589090913, 409.60124101818184, 409.6022464, 409.6025815272728, 409.6042571636364, 409.6045922909091, 409.60727330909094, 512.0002356363635, 512.0002356363635, 512.0002356363635, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0005707636362, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0009058909089, 512.0019112727272, 512.0069381818181, 512.0069381818181, 614.4005707636362, 614.4009058909089, 614.4009058909089, 614.4009058909089, 614.4009058909089, 716.8005707636362, 716.8005707636362, 716.8005707636362, 716.8009058909089, 716.8009058909089, 716.8009058909089]
a0 = np.array(avgArr0)
a1 = np.array(avgArr1)
a3 = np.array(avgArr3)
a5 = np.array(avgArr5)
a7 = np.array(avgArr7)
a9 = np.array(avgArr9)
sns.set(style="white", palette="muted", color_codes=True)
f, axes = plt.subplots(figsize=(10, 10))
axlabel = 'Максимальное время, мс'
sns.distplot(a0, color="#4285f4", ax=axes, axlabel=axlabel, label='0')
sns.distplot(a1, color="#ea4335", ax=axes, axlabel=axlabel, label='0.1')
sns.distplot(a3, color="#fbbc04", ax=axes, axlabel=axlabel, label='0.3')
sns.distplot(a5, color="#34a853", ax=axes, axlabel=axlabel, label='0.5')
sns.distplot(a7, color="#ff6d01", ax=axes, axlabel=axlabel, label='0.7')
sns.distplot(a9, color="#46bdc6", ax=axes, axlabel=axlabel, label='0.9')
axes.legend()
plt.setp(axes)
plt.tight_layout()
plt.show()
| 1,225
| 9,827
| 0.842371
| 5,347
| 51,450
| 8.105106
| 0.07911
| 0.089044
| 0.104758
| 0.17495
| 0.887812
| 0.851308
| 0.843463
| 0.823688
| 0.818104
| 0.804444
| 0
| 0.876708
| 0.052556
| 51,450
| 42
| 9,828
| 1,225
| 0.01235
| 0.000991
| 0
| 0
| 0
| 0
| 0.001751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
82db5cd044092b5940f86d2db38a99a403ba2b99
| 9,764
|
py
|
Python
|
tests/live/test_custom_data.py
|
denibertovic/stormpath-sdk-python
|
e594a1bb48de3fa8eff26558bf4f72bb056e9d00
|
[
"Apache-2.0"
] | null | null | null |
tests/live/test_custom_data.py
|
denibertovic/stormpath-sdk-python
|
e594a1bb48de3fa8eff26558bf4f72bb056e9d00
|
[
"Apache-2.0"
] | null | null | null |
tests/live/test_custom_data.py
|
denibertovic/stormpath-sdk-python
|
e594a1bb48de3fa8eff26558bf4f72bb056e9d00
|
[
"Apache-2.0"
] | null | null | null |
from stormpath.error import Error
from .base import SingleApplicationBase, AccountBase
CUSTOM_DATA = {
'foo': 'F00!',
'foo_val': 1,
'fooCamelCase': True,
'list_of_foo': [
'a', 1, False, {
'bar': 1,
'bar_val': 'value of bar',
'barCamelCase': True,
'subBar': {
'sub_bar_name': 'Baz',
'subBarCamel': 'Quux'
}
}
]
}
class CustomDataTest(SingleApplicationBase):
def setUp(self):
super(CustomDataTest, self).setUp()
self.custom_data_resources = {
'applications': self.client.applications,
'directories': self.client.directories
}
class TestAccountCustomData(AccountBase):
def test_account_creation_with_custom_data(self):
_, acc = self.create_account(self.app.accounts,
custom_data=CUSTOM_DATA)
acc = self.app.accounts.get(acc.href)
self.assertEqual(CUSTOM_DATA, dict(acc.custom_data))
def test_custom_data_behaves_as_dict(self):
_, acc = self.create_account(self.app.accounts,
custom_data=CUSTOM_DATA)
self.assertEqual(
set(CUSTOM_DATA.keys()),
set(acc.custom_data.keys()))
self.assertEqual(
len(CUSTOM_DATA.values()),
len(acc.custom_data.values()))
self.assertEqual(
len(CUSTOM_DATA.items()),
len(acc.custom_data.items()))
self.assertEqual(set(CUSTOM_DATA), set(acc.custom_data))
self.assertEqual(acc.custom_data['foo'], CUSTOM_DATA['foo'])
self.assertEqual(acc.custom_data.get('foo'), CUSTOM_DATA['foo'])
self.assertEqual(acc.custom_data.get('nonexistent', 42), 42)
def test_custom_data_modification(self):
name, acc = self.create_account(self.app.accounts)
self.assertEqual(dict(acc.custom_data), {})
acc.custom_data['foo'] = 'F00!'
acc.custom_data['bar_value'] = 1
acc.custom_data['bazCamelCase'] = {'a': 1}
acc.save()
acc = self.app.accounts.get(acc.href)
self.assertEqual(acc.custom_data['foo'], 'F00!')
self.assertEqual(acc.custom_data['bar_value'], 1)
self.assertEqual(acc.custom_data['bazCamelCase']['a'], 1)
with self.assertRaises(KeyError):
acc.custom_data['href'] = 'whatever'
with self.assertRaises(KeyError):
acc.custom_data['-foo'] = 'whatever'
acc.custom_data['foo'] = 'Not Foo anymore!'
del acc.custom_data['bar_value']
acc.custom_data.save()
acc = self.app.accounts.get(acc.href)
self.assertEqual(acc.custom_data['foo'], 'Not Foo anymore!')
self.assertFalse('bar_value' in acc.custom_data)
class TestApplicationAndDirectoryCustomData(CustomDataTest):
def test_creation_with_custom_data(self):
for e in self.custom_data_resources.values():
res = e.create({'name': self.get_random_name(),
'custom_data': CUSTOM_DATA})
self.deletes.append(res)
res = e.get(res.href)
self.assertEqual(CUSTOM_DATA, dict(res.custom_data))
def test_custom_data_behaves_as_dict(self):
for e in self.custom_data_resources.values():
res = e.create({'name': self.get_random_name(),
'custom_data': CUSTOM_DATA})
self.deletes.append(res)
self.assertEqual(
set(CUSTOM_DATA.keys()),
set(res.custom_data.keys()))
self.assertEqual(
len(CUSTOM_DATA.values()),
len(res.custom_data.values()))
self.assertEqual(
len(CUSTOM_DATA.items()),
len(res.custom_data.items()))
self.assertEqual(set(CUSTOM_DATA), set(res.custom_data))
self.assertEqual(res.custom_data['foo'], CUSTOM_DATA['foo'])
self.assertEqual(res.custom_data.get('foo'), CUSTOM_DATA['foo'])
self.assertEqual(res.custom_data.get('nonexistent', 42), 42)
def test_custom_data_modification(self):
for e in self.custom_data_resources.values():
res = e.create({'name': self.get_random_name()})
self.deletes.append(res)
self.assertEqual(dict(res.custom_data), {})
res.custom_data['foo'] = 'F00!'
res.custom_data['bar_value'] = 1
res.custom_data['bazCamelCase'] = {'a': 1}
res.save()
res = e.get(res.href)
self.assertEqual(res.custom_data['foo'], 'F00!')
self.assertEqual(res.custom_data['bar_value'], 1)
self.assertEqual(res.custom_data['bazCamelCase']['a'], 1)
with self.assertRaises(KeyError):
res.custom_data['href'] = 'whatever'
with self.assertRaises(KeyError):
res.custom_data['-foo'] = 'whatever'
res.custom_data['foo'] = 'Not Foo anymore!'
del res.custom_data['bar_value']
res.custom_data.save()
res = e.get(res.href)
self.assertEqual(res.custom_data['foo'], 'Not Foo anymore!')
self.assertFalse('bar_value' in res.custom_data)
class TestGroupCustomData(SingleApplicationBase):
def test_creation_with_custom_data(self):
res = self.app.groups.create({'name': self.get_random_name(),
'custom_data': CUSTOM_DATA})
res = self.app.groups.get(res.href)
self.assertEqual(CUSTOM_DATA, dict(res.custom_data))
def test_custom_data_behaves_as_dict(self):
res = self.app.groups.create({'name': self.get_random_name(),
'custom_data': CUSTOM_DATA})
self.assertEqual(
set(CUSTOM_DATA.keys()),
set(res.custom_data.keys()))
self.assertEqual(
len(CUSTOM_DATA.values()),
len(res.custom_data.values()))
self.assertEqual(
len(CUSTOM_DATA.items()),
len(res.custom_data.items()))
self.assertEqual(set(CUSTOM_DATA), set(res.custom_data))
self.assertEqual(res.custom_data['foo'], CUSTOM_DATA['foo'])
self.assertEqual(res.custom_data.get('foo'), CUSTOM_DATA['foo'])
self.assertEqual(res.custom_data.get('nonexistent', 42), 42)
def test_custom_data_modification(self):
res = self.app.groups.create({'name': self.get_random_name()})
self.assertEqual(dict(res.custom_data), {})
res.custom_data['foo'] = 'F00!'
res.custom_data['bar_value'] = 1
res.custom_data['bazCamelCase'] = {'a': 1}
res.save()
res = self.app.groups.get(res.href)
self.assertEqual(res.custom_data['foo'], 'F00!')
self.assertEqual(res.custom_data['bar_value'], 1)
self.assertEqual(res.custom_data['bazCamelCase']['a'], 1)
with self.assertRaises(KeyError):
res.custom_data['href'] = 'whatever'
with self.assertRaises(KeyError):
res.custom_data['-foo'] = 'whatever'
res.custom_data['foo'] = 'Not Foo anymore!'
del res.custom_data['bar_value']
res.custom_data.save()
res = self.app.groups.get(res.href)
self.assertEqual(res.custom_data['foo'], 'Not Foo anymore!')
self.assertFalse('bar_value' in res.custom_data)
class TestTenantCustomData(SingleApplicationBase):
def setUp(self):
super(TestTenantCustomData, self).setUp()
self.client.tenant.custom_data.delete()
def tearDown(self):
super(TestTenantCustomData, self).tearDown()
self.client.tenant.custom_data.delete()
def test_tenant_has_empty_custom_data(self):
self.assertEqual({}, dict(self.client.tenant.custom_data))
def test_tenant_with_custom_data(self):
self.client.tenant.custom_data['testCamelCase'] = 'TEST'
self.client.tenant.save()
self.assertEqual({'testCamelCase': 'TEST'}, dict(self.client.tenant.custom_data))
def test_custom_data_behaves_as_dict(self):
res = self.client.tenant
for key in CUSTOM_DATA.keys():
res.custom_data[key] = CUSTOM_DATA[key]
self.assertEqual(
set(CUSTOM_DATA.keys()),
set(res.custom_data.keys()))
self.assertEqual(
len(CUSTOM_DATA.values()),
len(res.custom_data.values()))
self.assertEqual(
len(CUSTOM_DATA.items()),
len(res.custom_data.items()))
self.assertEqual(set(CUSTOM_DATA), set(res.custom_data))
self.assertEqual(res.custom_data['foo'], CUSTOM_DATA['foo'])
self.assertEqual(res.custom_data.get('foo'), CUSTOM_DATA['foo'])
self.assertEqual(res.custom_data.get('nonexistent', 42), 42)
def test_custom_data_modification(self):
res = self.client.tenant
self.assertEqual(dict(res.custom_data), {})
for key in CUSTOM_DATA.keys():
res.custom_data[key] = CUSTOM_DATA[key]
res.custom_data['foo'] = 'F00!'
res.custom_data['bar_value'] = 1
res.custom_data['bazCamelCase'] = {'a': 1}
res.save()
self.assertEqual(res.custom_data['foo'], 'F00!')
self.assertEqual(res.custom_data['bar_value'], 1)
self.assertEqual(res.custom_data['bazCamelCase']['a'], 1)
with self.assertRaises(KeyError):
res.custom_data['href'] = 'whatever'
with self.assertRaises(KeyError):
res.custom_data['-foo'] = 'whatever'
res.custom_data['foo'] = 'Not Foo anymore!'
del res.custom_data['bar_value']
res.custom_data.save()
self.assertEqual(res.custom_data['foo'], 'Not Foo anymore!')
self.assertFalse('bar_value' in res.custom_data)
| 32.546667
| 89
| 0.607333
| 1,154
| 9,764
| 4.931542
| 0.076257
| 0.27236
| 0.153049
| 0.088561
| 0.85644
| 0.823405
| 0.796521
| 0.752065
| 0.731506
| 0.703918
| 0
| 0.007267
| 0.253073
| 9,764
| 299
| 90
| 32.655518
| 0.77307
| 0
| 0
| 0.696682
| 0
| 0
| 0.08982
| 0
| 0
| 0
| 0
| 0
| 0.308057
| 1
| 0.075829
| false
| 0
| 0.009479
| 0
| 0.109005
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81b510a811adbbee004d2f4aae5cb16638f0913c
| 124
|
py
|
Python
|
TEMPLATE/TEMPLATE/__init__.py
|
bhautikj/miniPyProjectMaker
|
db88f1d28a2930c80954416fbbbba11a557af483
|
[
"MIT"
] | null | null | null |
TEMPLATE/TEMPLATE/__init__.py
|
bhautikj/miniPyProjectMaker
|
db88f1d28a2930c80954416fbbbba11a557af483
|
[
"MIT"
] | null | null | null |
TEMPLATE/TEMPLATE/__init__.py
|
bhautikj/miniPyProjectMaker
|
db88f1d28a2930c80954416fbbbba11a557af483
|
[
"MIT"
] | null | null | null |
import TEMPLATE.core.base
def DummySpit():
return "BLARGH"
def DummySpitBase():
return TEMPLATE.core.base.DummySpit()
| 15.5
| 39
| 0.75
| 15
| 124
| 6.2
| 0.6
| 0.258065
| 0.344086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 124
| 7
| 40
| 17.714286
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
81e604b52fe44d62364b7dc23c56b2b0dbd76f4e
| 3,735
|
py
|
Python
|
tests/summarizer/test_rouge.py
|
doruktiktiklar/sadedegel
|
3362c4b6bf07c34634313b9eafe52e6817efec60
|
[
"MIT"
] | null | null | null |
tests/summarizer/test_rouge.py
|
doruktiktiklar/sadedegel
|
3362c4b6bf07c34634313b9eafe52e6817efec60
|
[
"MIT"
] | null | null | null |
tests/summarizer/test_rouge.py
|
doruktiktiklar/sadedegel
|
3362c4b6bf07c34634313b9eafe52e6817efec60
|
[
"MIT"
] | null | null | null |
from pytest import approx, raises
import numpy as np
import pytest
from .context import Rouge1Summarizer, Doc, tokenizer_context, SimpleTokenizer, BertTokenizer
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 4, 1 / 4, 2 / 4])),
(BertTokenizer.__name__, np.array([2 / 4, 2 / 5, 3 / 4]))])
def test_rouge1_summarizer_precision_all_lower(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="precision")
assert summ.predict(Doc('ali topu tut. oya ip atla. ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 4, 1 / 4, 2 / 4])),
(BertTokenizer.__name__, np.array([2 / 4, 2 / 5, 3 / 4]))])
def test_rouge1_summarizer_precision_proper_case(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="precision")
assert summ.predict(Doc('Ali topu tut. Oya ip atla. Ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 8, 1 / 8, 2 / 8])),
(BertTokenizer.__name__, np.array([2 / 9, 2 / 8, 3 / 9]))])
def test_rouge1_summarizer_recall_all_lower(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="recall")
assert summ.predict(Doc('ali topu tut. oya ip atla. ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 8, 1 / 8, 2 / 8])),
(BertTokenizer.__name__, np.array([2 / 9, 2 / 8, 3 / 9]))])
def test_rouge1_summarizer_recall_proper_case(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="recall")
assert summ.predict(Doc('Ali topu tut. Oya ip atla. Ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([0.33333333, 0.16666667, 0.33333333])),
(BertTokenizer.__name__, np.array([0.30769231, 0.30769231, 0.46153846]))])
def test_rouge1_summarizer_f1_all_lower(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False)
assert summ.predict(Doc('ali topu tut. oya ip atla. ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([0.33333333, 0.16666667, 0.33333333])),
(BertTokenizer.__name__, np.array([0.30769231, 0.30769231, 0.46153846]))])
def test_rouge1_summarizer_f1_proper_case(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False)
assert summ.predict(Doc('Ali topu tut. Oya ip atla. Ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer", [SimpleTokenizer.__name__, BertTokenizer.__name__])
def test_rouge1_summarize_text(tokenizer):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer()
doc = Doc('ali topu tut. oya ip atla. ahmet topu at.')
assert summ(doc, k=1) == [doc.sents[2]]
def test_rouge1_summarizer_unknown_mode():
with raises(ValueError):
_ = Rouge1Summarizer('unknown')
| 46.111111
| 101
| 0.64739
| 444
| 3,735
| 5.175676
| 0.146396
| 0.070496
| 0.093995
| 0.041775
| 0.8651
| 0.8651
| 0.843777
| 0.843777
| 0.843777
| 0.843777
| 0
| 0.061288
| 0.226774
| 3,735
| 80
| 102
| 46.6875
| 0.734418
| 0
| 0
| 0.704918
| 0
| 0
| 0.122892
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 1
| 0.131148
| false
| 0
| 0.065574
| 0
| 0.196721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4a2f204e21126992b3c36cf78e9549332c7df73
| 4,968
|
py
|
Python
|
tests/dict/test_dict_substitution.py
|
nikitanovosibirsk/revolt
|
3659308bc0dffe6bbc866536984aed3597624213
|
[
"Apache-2.0"
] | null | null | null |
tests/dict/test_dict_substitution.py
|
nikitanovosibirsk/revolt
|
3659308bc0dffe6bbc866536984aed3597624213
|
[
"Apache-2.0"
] | 1
|
2021-12-05T11:49:18.000Z
|
2021-12-05T11:49:18.000Z
|
tests/dict/test_dict_substitution.py
|
nikitanovosibirsk/revolt
|
3659308bc0dffe6bbc866536984aed3597624213
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import sentinel
from baby_steps import given, then, when
from district42 import from_native, optional, schema
from pytest import raises
from revolt import substitute
from revolt.errors import SubstitutionError
def test_dict_no_keys_substitution():
with given:
sch = schema.dict
with when:
res = substitute(sch, {})
with then:
assert res == schema.dict({})
assert res != sch
def test_dict_invalid_value_substitution_error():
with given:
sch = schema.dict
with when, raises(Exception) as exception:
substitute(sch, [])
with then:
assert exception.type is SubstitutionError
def test_dict_keys_substitution():
with given:
sch = schema.dict
value = {
"result": {
"id": 1,
"name": "Bob"
},
}
with when:
res = substitute(sch, value)
with then:
assert res == from_native(value)
assert res != sch
def test_dict_value_substitution_error():
with given:
sch = schema.dict
with when, raises(Exception) as exception:
substitute(sch, {"val": sentinel})
with then:
assert exception.type is SubstitutionError
def test_dict_values_substitution():
with given:
sch = schema.dict({
"result": schema.dict({
"id": schema.int,
"name": schema.str,
}),
})
with when:
res = substitute(sch, {
"result": {
"id": 1,
"name": "Bob",
},
})
with then:
assert res == schema.dict({
"result": schema.dict({
"id": schema.int(1),
"name": schema.str("Bob"),
}),
})
assert res != sch
def test_dict_incorrect_value_substitution_error():
with given:
sch = schema.dict({
"id": schema.int,
})
with when, raises(Exception) as exception:
substitute(sch, {"id": "1"})
with then:
assert exception.type is SubstitutionError
def test_dict_more_keys_substitution_error():
with given:
sch = schema.dict({
"id": schema.int,
})
with when, raises(Exception) as exception:
substitute(sch, {
"id": 1,
"name": "Bob",
})
with then:
assert exception.type is SubstitutionError
def test_dict_less_keys_substitution():
with given:
sch = schema.dict({
"result": schema.dict({
"id": schema.int,
"name": schema.str,
}),
})
with when:
res = substitute(sch, {
"result": {
"id": 1,
},
})
with then:
assert res == schema.dict({
"result": schema.dict({
"id": schema.int(1),
"name": schema.str,
}),
})
assert res != sch
def test_dict_nested_less_keys_substitution():
with given:
sch = schema.dict({
"friends": schema.list(schema.dict({
"id": schema.int,
"name": schema.str,
}))
})
with when:
res = substitute(sch, {
"friends": [
{
"id": 1
}
]
})
with then:
assert res == schema.dict({
"friends": schema.list([
schema.dict({
"id": schema.int(1),
"name": schema.str,
})
])
})
assert res != sch
def test_dict_with_optional_key_substitution():
with given:
sch = schema.dict({
"id": schema.int,
optional("name"): schema.str
})
with when:
res = substitute(sch, {
"id": 1,
})
with then:
assert res == schema.dict({
"id": schema.int(1),
optional("name"): schema.str
})
assert res != sch
def test_dict_with_optional_key_override_substitution():
with given:
sch = schema.dict({
"id": schema.int,
optional("name"): schema.str
})
with when:
res = substitute(sch, {
"id": 1,
"name": "Bob"
})
with then:
assert res == schema.dict({
"id": schema.int(1),
"name": schema.str("Bob"),
})
assert res != sch
def test_dict_with_optional_key_invalid_type_substitution_error():
with given:
sch = schema.dict({
"id": schema.int,
optional("name"): schema.str
})
with when, raises(Exception) as exception:
substitute(sch, {
"id": 1,
"name": None
})
with then:
assert exception.type is SubstitutionError
| 21.506494
| 66
| 0.488325
| 488
| 4,968
| 4.848361
| 0.112705
| 0.101437
| 0.065934
| 0.098901
| 0.859256
| 0.844041
| 0.804734
| 0.749789
| 0.722316
| 0.714708
| 0
| 0.005335
| 0.396337
| 4,968
| 230
| 67
| 21.6
| 0.783595
| 0
| 0
| 0.772222
| 0
| 0
| 0.038849
| 0
| 0
| 0
| 0
| 0
| 0.105556
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1ef2ba4433cb7945016fba7066dd8ee2a07d3e73
| 25,878
|
py
|
Python
|
modelica_builder/modelica_parser/modelicaListener.py
|
urbanopt/modelica-builder
|
6a7c245f9e69714bec1e4fb0da2645b9993b87d5
|
[
"BSD-3-Clause"
] | 8
|
2020-09-24T17:18:12.000Z
|
2022-01-19T03:43:25.000Z
|
modelica_builder/modelica_parser/modelicaListener.py
|
urbanopt/modelica-builder
|
6a7c245f9e69714bec1e4fb0da2645b9993b87d5
|
[
"BSD-3-Clause"
] | 25
|
2020-04-27T14:38:59.000Z
|
2022-03-14T18:52:44.000Z
|
modelica_builder/modelica_parser/modelicaListener.py
|
urbanopt/modelica-builder
|
6a7c245f9e69714bec1e4fb0da2645b9993b87d5
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated from /var/antlrResult/modelica.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .modelicaParser import modelicaParser
else:
from modelicaParser import modelicaParser
# This class defines a complete listener for a parse tree produced by modelicaParser.
class modelicaListener(ParseTreeListener):
# Enter a parse tree produced by modelicaParser#stored_definition.
def enterStored_definition(self, ctx:modelicaParser.Stored_definitionContext):
pass
# Exit a parse tree produced by modelicaParser#stored_definition.
def exitStored_definition(self, ctx:modelicaParser.Stored_definitionContext):
pass
# Enter a parse tree produced by modelicaParser#within_statement.
def enterWithin_statement(self, ctx:modelicaParser.Within_statementContext):
pass
# Exit a parse tree produced by modelicaParser#within_statement.
def exitWithin_statement(self, ctx:modelicaParser.Within_statementContext):
pass
# Enter a parse tree produced by modelicaParser#class_definition.
def enterClass_definition(self, ctx:modelicaParser.Class_definitionContext):
pass
# Exit a parse tree produced by modelicaParser#class_definition.
def exitClass_definition(self, ctx:modelicaParser.Class_definitionContext):
pass
# Enter a parse tree produced by modelicaParser#class_specifier.
def enterClass_specifier(self, ctx:modelicaParser.Class_specifierContext):
pass
# Exit a parse tree produced by modelicaParser#class_specifier.
def exitClass_specifier(self, ctx:modelicaParser.Class_specifierContext):
pass
# Enter a parse tree produced by modelicaParser#class_prefixes.
def enterClass_prefixes(self, ctx:modelicaParser.Class_prefixesContext):
pass
# Exit a parse tree produced by modelicaParser#class_prefixes.
def exitClass_prefixes(self, ctx:modelicaParser.Class_prefixesContext):
pass
# Enter a parse tree produced by modelicaParser#long_class_specifier.
def enterLong_class_specifier(self, ctx:modelicaParser.Long_class_specifierContext):
pass
# Exit a parse tree produced by modelicaParser#long_class_specifier.
def exitLong_class_specifier(self, ctx:modelicaParser.Long_class_specifierContext):
pass
# Enter a parse tree produced by modelicaParser#short_class_specifier.
def enterShort_class_specifier(self, ctx:modelicaParser.Short_class_specifierContext):
pass
# Exit a parse tree produced by modelicaParser#short_class_specifier.
def exitShort_class_specifier(self, ctx:modelicaParser.Short_class_specifierContext):
pass
# Enter a parse tree produced by modelicaParser#der_class_specifier.
def enterDer_class_specifier(self, ctx:modelicaParser.Der_class_specifierContext):
pass
# Exit a parse tree produced by modelicaParser#der_class_specifier.
def exitDer_class_specifier(self, ctx:modelicaParser.Der_class_specifierContext):
pass
# Enter a parse tree produced by modelicaParser#base_prefix.
def enterBase_prefix(self, ctx:modelicaParser.Base_prefixContext):
pass
# Exit a parse tree produced by modelicaParser#base_prefix.
def exitBase_prefix(self, ctx:modelicaParser.Base_prefixContext):
pass
# Enter a parse tree produced by modelicaParser#enum_list.
def enterEnum_list(self, ctx:modelicaParser.Enum_listContext):
pass
# Exit a parse tree produced by modelicaParser#enum_list.
def exitEnum_list(self, ctx:modelicaParser.Enum_listContext):
pass
# Enter a parse tree produced by modelicaParser#enumeration_literal.
def enterEnumeration_literal(self, ctx:modelicaParser.Enumeration_literalContext):
pass
# Exit a parse tree produced by modelicaParser#enumeration_literal.
def exitEnumeration_literal(self, ctx:modelicaParser.Enumeration_literalContext):
pass
# Enter a parse tree produced by modelicaParser#composition.
def enterComposition(self, ctx:modelicaParser.CompositionContext):
pass
# Exit a parse tree produced by modelicaParser#composition.
def exitComposition(self, ctx:modelicaParser.CompositionContext):
pass
# Enter a parse tree produced by modelicaParser#language_specification.
def enterLanguage_specification(self, ctx:modelicaParser.Language_specificationContext):
pass
# Exit a parse tree produced by modelicaParser#language_specification.
def exitLanguage_specification(self, ctx:modelicaParser.Language_specificationContext):
pass
# Enter a parse tree produced by modelicaParser#external_function_call.
def enterExternal_function_call(self, ctx:modelicaParser.External_function_callContext):
pass
# Exit a parse tree produced by modelicaParser#external_function_call.
def exitExternal_function_call(self, ctx:modelicaParser.External_function_callContext):
pass
# Enter a parse tree produced by modelicaParser#element_list.
def enterElement_list(self, ctx:modelicaParser.Element_listContext):
pass
# Exit a parse tree produced by modelicaParser#element_list.
def exitElement_list(self, ctx:modelicaParser.Element_listContext):
pass
# Enter a parse tree produced by modelicaParser#element.
def enterElement(self, ctx:modelicaParser.ElementContext):
pass
# Exit a parse tree produced by modelicaParser#element.
def exitElement(self, ctx:modelicaParser.ElementContext):
pass
# Enter a parse tree produced by modelicaParser#import_clause.
def enterImport_clause(self, ctx:modelicaParser.Import_clauseContext):
pass
# Exit a parse tree produced by modelicaParser#import_clause.
def exitImport_clause(self, ctx:modelicaParser.Import_clauseContext):
pass
# Enter a parse tree produced by modelicaParser#import_list.
def enterImport_list(self, ctx:modelicaParser.Import_listContext):
pass
# Exit a parse tree produced by modelicaParser#import_list.
def exitImport_list(self, ctx:modelicaParser.Import_listContext):
pass
# Enter a parse tree produced by modelicaParser#extends_clause.
def enterExtends_clause(self, ctx:modelicaParser.Extends_clauseContext):
pass
# Exit a parse tree produced by modelicaParser#extends_clause.
def exitExtends_clause(self, ctx:modelicaParser.Extends_clauseContext):
pass
# Enter a parse tree produced by modelicaParser#constraining_clause.
def enterConstraining_clause(self, ctx:modelicaParser.Constraining_clauseContext):
pass
# Exit a parse tree produced by modelicaParser#constraining_clause.
def exitConstraining_clause(self, ctx:modelicaParser.Constraining_clauseContext):
pass
# Enter a parse tree produced by modelicaParser#component_clause.
def enterComponent_clause(self, ctx:modelicaParser.Component_clauseContext):
pass
# Exit a parse tree produced by modelicaParser#component_clause.
def exitComponent_clause(self, ctx:modelicaParser.Component_clauseContext):
pass
# Enter a parse tree produced by modelicaParser#type_prefix.
def enterType_prefix(self, ctx:modelicaParser.Type_prefixContext):
pass
# Exit a parse tree produced by modelicaParser#type_prefix.
def exitType_prefix(self, ctx:modelicaParser.Type_prefixContext):
pass
# Enter a parse tree produced by modelicaParser#type_specifier.
def enterType_specifier(self, ctx:modelicaParser.Type_specifierContext):
pass
# Exit a parse tree produced by modelicaParser#type_specifier.
def exitType_specifier(self, ctx:modelicaParser.Type_specifierContext):
pass
# Enter a parse tree produced by modelicaParser#component_list.
def enterComponent_list(self, ctx:modelicaParser.Component_listContext):
pass
# Exit a parse tree produced by modelicaParser#component_list.
def exitComponent_list(self, ctx:modelicaParser.Component_listContext):
pass
# Enter a parse tree produced by modelicaParser#component_declaration.
def enterComponent_declaration(self, ctx:modelicaParser.Component_declarationContext):
pass
# Exit a parse tree produced by modelicaParser#component_declaration.
def exitComponent_declaration(self, ctx:modelicaParser.Component_declarationContext):
pass
# Enter a parse tree produced by modelicaParser#condition_attribute.
def enterCondition_attribute(self, ctx:modelicaParser.Condition_attributeContext):
pass
# Exit a parse tree produced by modelicaParser#condition_attribute.
def exitCondition_attribute(self, ctx:modelicaParser.Condition_attributeContext):
pass
# Enter a parse tree produced by modelicaParser#declaration.
def enterDeclaration(self, ctx:modelicaParser.DeclarationContext):
pass
# Exit a parse tree produced by modelicaParser#declaration.
def exitDeclaration(self, ctx:modelicaParser.DeclarationContext):
pass
# Enter a parse tree produced by modelicaParser#modification.
def enterModification(self, ctx:modelicaParser.ModificationContext):
pass
# Exit a parse tree produced by modelicaParser#modification.
def exitModification(self, ctx:modelicaParser.ModificationContext):
pass
# Enter a parse tree produced by modelicaParser#class_modification.
def enterClass_modification(self, ctx:modelicaParser.Class_modificationContext):
pass
# Exit a parse tree produced by modelicaParser#class_modification.
def exitClass_modification(self, ctx:modelicaParser.Class_modificationContext):
pass
# Enter a parse tree produced by modelicaParser#argument_list.
def enterArgument_list(self, ctx:modelicaParser.Argument_listContext):
pass
# Exit a parse tree produced by modelicaParser#argument_list.
def exitArgument_list(self, ctx:modelicaParser.Argument_listContext):
pass
# Enter a parse tree produced by modelicaParser#argument.
def enterArgument(self, ctx:modelicaParser.ArgumentContext):
pass
# Exit a parse tree produced by modelicaParser#argument.
def exitArgument(self, ctx:modelicaParser.ArgumentContext):
pass
# Enter a parse tree produced by modelicaParser#element_modification_or_replaceable.
def enterElement_modification_or_replaceable(self, ctx:modelicaParser.Element_modification_or_replaceableContext):
pass
# Exit a parse tree produced by modelicaParser#element_modification_or_replaceable.
def exitElement_modification_or_replaceable(self, ctx:modelicaParser.Element_modification_or_replaceableContext):
pass
# Enter a parse tree produced by modelicaParser#element_modification.
def enterElement_modification(self, ctx:modelicaParser.Element_modificationContext):
pass
# Exit a parse tree produced by modelicaParser#element_modification.
def exitElement_modification(self, ctx:modelicaParser.Element_modificationContext):
pass
# Enter a parse tree produced by modelicaParser#element_redeclaration.
def enterElement_redeclaration(self, ctx:modelicaParser.Element_redeclarationContext):
pass
# Exit a parse tree produced by modelicaParser#element_redeclaration.
def exitElement_redeclaration(self, ctx:modelicaParser.Element_redeclarationContext):
pass
# Enter a parse tree produced by modelicaParser#element_replaceable.
def enterElement_replaceable(self, ctx:modelicaParser.Element_replaceableContext):
pass
# Exit a parse tree produced by modelicaParser#element_replaceable.
def exitElement_replaceable(self, ctx:modelicaParser.Element_replaceableContext):
pass
# Enter a parse tree produced by modelicaParser#component_clause1.
def enterComponent_clause1(self, ctx:modelicaParser.Component_clause1Context):
pass
# Exit a parse tree produced by modelicaParser#component_clause1.
def exitComponent_clause1(self, ctx:modelicaParser.Component_clause1Context):
pass
# Enter a parse tree produced by modelicaParser#component_declaration1.
def enterComponent_declaration1(self, ctx:modelicaParser.Component_declaration1Context):
pass
# Exit a parse tree produced by modelicaParser#component_declaration1.
def exitComponent_declaration1(self, ctx:modelicaParser.Component_declaration1Context):
pass
# Enter a parse tree produced by modelicaParser#short_class_definition.
def enterShort_class_definition(self, ctx:modelicaParser.Short_class_definitionContext):
pass
# Exit a parse tree produced by modelicaParser#short_class_definition.
def exitShort_class_definition(self, ctx:modelicaParser.Short_class_definitionContext):
pass
# Enter a parse tree produced by modelicaParser#equation_section.
def enterEquation_section(self, ctx:modelicaParser.Equation_sectionContext):
pass
# Exit a parse tree produced by modelicaParser#equation_section.
def exitEquation_section(self, ctx:modelicaParser.Equation_sectionContext):
pass
# Enter a parse tree produced by modelicaParser#algorithm_section.
def enterAlgorithm_section(self, ctx:modelicaParser.Algorithm_sectionContext):
pass
# Exit a parse tree produced by modelicaParser#algorithm_section.
def exitAlgorithm_section(self, ctx:modelicaParser.Algorithm_sectionContext):
pass
# Enter a parse tree produced by modelicaParser#equation.
def enterEquation(self, ctx:modelicaParser.EquationContext):
pass
# Exit a parse tree produced by modelicaParser#equation.
def exitEquation(self, ctx:modelicaParser.EquationContext):
pass
# Enter a parse tree produced by modelicaParser#statement.
def enterStatement(self, ctx:modelicaParser.StatementContext):
pass
# Exit a parse tree produced by modelicaParser#statement.
def exitStatement(self, ctx:modelicaParser.StatementContext):
pass
# Enter a parse tree produced by modelicaParser#if_equation.
def enterIf_equation(self, ctx:modelicaParser.If_equationContext):
pass
# Exit a parse tree produced by modelicaParser#if_equation.
def exitIf_equation(self, ctx:modelicaParser.If_equationContext):
pass
# Enter a parse tree produced by modelicaParser#if_statement.
def enterIf_statement(self, ctx:modelicaParser.If_statementContext):
pass
# Exit a parse tree produced by modelicaParser#if_statement.
def exitIf_statement(self, ctx:modelicaParser.If_statementContext):
pass
# Enter a parse tree produced by modelicaParser#for_equation.
def enterFor_equation(self, ctx:modelicaParser.For_equationContext):
pass
# Exit a parse tree produced by modelicaParser#for_equation.
def exitFor_equation(self, ctx:modelicaParser.For_equationContext):
pass
# Enter a parse tree produced by modelicaParser#for_statement.
def enterFor_statement(self, ctx:modelicaParser.For_statementContext):
pass
# Exit a parse tree produced by modelicaParser#for_statement.
def exitFor_statement(self, ctx:modelicaParser.For_statementContext):
pass
# Enter a parse tree produced by modelicaParser#for_indices.
def enterFor_indices(self, ctx:modelicaParser.For_indicesContext):
pass
# Exit a parse tree produced by modelicaParser#for_indices.
def exitFor_indices(self, ctx:modelicaParser.For_indicesContext):
pass
# Enter a parse tree produced by modelicaParser#for_index.
def enterFor_index(self, ctx:modelicaParser.For_indexContext):
pass
# Exit a parse tree produced by modelicaParser#for_index.
def exitFor_index(self, ctx:modelicaParser.For_indexContext):
pass
# Enter a parse tree produced by modelicaParser#while_statement.
def enterWhile_statement(self, ctx:modelicaParser.While_statementContext):
pass
# Exit a parse tree produced by modelicaParser#while_statement.
def exitWhile_statement(self, ctx:modelicaParser.While_statementContext):
pass
# Enter a parse tree produced by modelicaParser#when_equation.
def enterWhen_equation(self, ctx:modelicaParser.When_equationContext):
pass
# Exit a parse tree produced by modelicaParser#when_equation.
def exitWhen_equation(self, ctx:modelicaParser.When_equationContext):
pass
# Enter a parse tree produced by modelicaParser#when_statement.
def enterWhen_statement(self, ctx:modelicaParser.When_statementContext):
pass
# Exit a parse tree produced by modelicaParser#when_statement.
def exitWhen_statement(self, ctx:modelicaParser.When_statementContext):
pass
# Enter a parse tree produced by modelicaParser#connect_clause.
def enterConnect_clause(self, ctx:modelicaParser.Connect_clauseContext):
pass
# Exit a parse tree produced by modelicaParser#connect_clause.
def exitConnect_clause(self, ctx:modelicaParser.Connect_clauseContext):
pass
# Enter a parse tree produced by modelicaParser#expression.
def enterExpression(self, ctx:modelicaParser.ExpressionContext):
pass
# Exit a parse tree produced by modelicaParser#expression.
def exitExpression(self, ctx:modelicaParser.ExpressionContext):
pass
# Enter a parse tree produced by modelicaParser#simple_expression.
def enterSimple_expression(self, ctx:modelicaParser.Simple_expressionContext):
pass
# Exit a parse tree produced by modelicaParser#simple_expression.
def exitSimple_expression(self, ctx:modelicaParser.Simple_expressionContext):
pass
# Enter a parse tree produced by modelicaParser#logical_expression.
def enterLogical_expression(self, ctx:modelicaParser.Logical_expressionContext):
pass
# Exit a parse tree produced by modelicaParser#logical_expression.
def exitLogical_expression(self, ctx:modelicaParser.Logical_expressionContext):
pass
# Enter a parse tree produced by modelicaParser#logical_term.
def enterLogical_term(self, ctx:modelicaParser.Logical_termContext):
pass
# Exit a parse tree produced by modelicaParser#logical_term.
def exitLogical_term(self, ctx:modelicaParser.Logical_termContext):
pass
# Enter a parse tree produced by modelicaParser#logical_factor.
def enterLogical_factor(self, ctx:modelicaParser.Logical_factorContext):
pass
# Exit a parse tree produced by modelicaParser#logical_factor.
def exitLogical_factor(self, ctx:modelicaParser.Logical_factorContext):
pass
# Enter a parse tree produced by modelicaParser#relation.
def enterRelation(self, ctx:modelicaParser.RelationContext):
pass
# Exit a parse tree produced by modelicaParser#relation.
def exitRelation(self, ctx:modelicaParser.RelationContext):
pass
# Enter a parse tree produced by modelicaParser#rel_op.
def enterRel_op(self, ctx:modelicaParser.Rel_opContext):
pass
# Exit a parse tree produced by modelicaParser#rel_op.
def exitRel_op(self, ctx:modelicaParser.Rel_opContext):
pass
# Enter a parse tree produced by modelicaParser#arithmetic_expression.
def enterArithmetic_expression(self, ctx:modelicaParser.Arithmetic_expressionContext):
pass
# Exit a parse tree produced by modelicaParser#arithmetic_expression.
def exitArithmetic_expression(self, ctx:modelicaParser.Arithmetic_expressionContext):
pass
# Enter a parse tree produced by modelicaParser#add_op.
def enterAdd_op(self, ctx:modelicaParser.Add_opContext):
pass
# Exit a parse tree produced by modelicaParser#add_op.
def exitAdd_op(self, ctx:modelicaParser.Add_opContext):
pass
# Enter a parse tree produced by modelicaParser#term.
def enterTerm(self, ctx:modelicaParser.TermContext):
pass
# Exit a parse tree produced by modelicaParser#term.
def exitTerm(self, ctx:modelicaParser.TermContext):
pass
# Enter a parse tree produced by modelicaParser#mul_op.
def enterMul_op(self, ctx:modelicaParser.Mul_opContext):
pass
# Exit a parse tree produced by modelicaParser#mul_op.
def exitMul_op(self, ctx:modelicaParser.Mul_opContext):
pass
# Enter a parse tree produced by modelicaParser#factor.
def enterFactor(self, ctx:modelicaParser.FactorContext):
pass
# Exit a parse tree produced by modelicaParser#factor.
def exitFactor(self, ctx:modelicaParser.FactorContext):
pass
# Enter a parse tree produced by modelicaParser#primary.
def enterPrimary(self, ctx:modelicaParser.PrimaryContext):
pass
# Exit a parse tree produced by modelicaParser#primary.
def exitPrimary(self, ctx:modelicaParser.PrimaryContext):
pass
# Enter a parse tree produced by modelicaParser#name.
def enterName(self, ctx:modelicaParser.NameContext):
pass
# Exit a parse tree produced by modelicaParser#name.
def exitName(self, ctx:modelicaParser.NameContext):
pass
# Enter a parse tree produced by modelicaParser#component_reference.
def enterComponent_reference(self, ctx:modelicaParser.Component_referenceContext):
pass
# Exit a parse tree produced by modelicaParser#component_reference.
def exitComponent_reference(self, ctx:modelicaParser.Component_referenceContext):
pass
# Enter a parse tree produced by modelicaParser#function_call_args.
def enterFunction_call_args(self, ctx:modelicaParser.Function_call_argsContext):
pass
# Exit a parse tree produced by modelicaParser#function_call_args.
def exitFunction_call_args(self, ctx:modelicaParser.Function_call_argsContext):
pass
# Enter a parse tree produced by modelicaParser#function_arguments.
def enterFunction_arguments(self, ctx:modelicaParser.Function_argumentsContext):
pass
# Exit a parse tree produced by modelicaParser#function_arguments.
def exitFunction_arguments(self, ctx:modelicaParser.Function_argumentsContext):
pass
# Enter a parse tree produced by modelicaParser#named_arguments.
def enterNamed_arguments(self, ctx:modelicaParser.Named_argumentsContext):
pass
# Exit a parse tree produced by modelicaParser#named_arguments.
def exitNamed_arguments(self, ctx:modelicaParser.Named_argumentsContext):
pass
# Enter a parse tree produced by modelicaParser#named_argument.
def enterNamed_argument(self, ctx:modelicaParser.Named_argumentContext):
pass
# Exit a parse tree produced by modelicaParser#named_argument.
def exitNamed_argument(self, ctx:modelicaParser.Named_argumentContext):
pass
# Enter a parse tree produced by modelicaParser#function_argument.
def enterFunction_argument(self, ctx:modelicaParser.Function_argumentContext):
pass
# Exit a parse tree produced by modelicaParser#function_argument.
def exitFunction_argument(self, ctx:modelicaParser.Function_argumentContext):
pass
# Enter a parse tree produced by modelicaParser#output_expression_list.
def enterOutput_expression_list(self, ctx:modelicaParser.Output_expression_listContext):
pass
# Exit a parse tree produced by modelicaParser#output_expression_list.
def exitOutput_expression_list(self, ctx:modelicaParser.Output_expression_listContext):
pass
# Enter a parse tree produced by modelicaParser#expression_list.
def enterExpression_list(self, ctx:modelicaParser.Expression_listContext):
pass
# Exit a parse tree produced by modelicaParser#expression_list.
def exitExpression_list(self, ctx:modelicaParser.Expression_listContext):
pass
# Enter a parse tree produced by modelicaParser#array_subscripts.
def enterArray_subscripts(self, ctx:modelicaParser.Array_subscriptsContext):
pass
# Exit a parse tree produced by modelicaParser#array_subscripts.
def exitArray_subscripts(self, ctx:modelicaParser.Array_subscriptsContext):
pass
# Enter a parse tree produced by modelicaParser#subscript.
def enterSubscript(self, ctx:modelicaParser.SubscriptContext):
pass
# Exit a parse tree produced by modelicaParser#subscript.
def exitSubscript(self, ctx:modelicaParser.SubscriptContext):
pass
# Enter a parse tree produced by modelicaParser#comment.
def enterComment(self, ctx:modelicaParser.CommentContext):
pass
# Exit a parse tree produced by modelicaParser#comment.
def exitComment(self, ctx:modelicaParser.CommentContext):
pass
# Enter a parse tree produced by modelicaParser#string_comment.
def enterString_comment(self, ctx:modelicaParser.String_commentContext):
pass
# Exit a parse tree produced by modelicaParser#string_comment.
def exitString_comment(self, ctx:modelicaParser.String_commentContext):
pass
# Enter a parse tree produced by modelicaParser#model_annotation.
def enterModel_annotation(self, ctx:modelicaParser.Model_annotationContext):
pass
# Exit a parse tree produced by modelicaParser#model_annotation.
def exitModel_annotation(self, ctx:modelicaParser.Model_annotationContext):
pass
# Enter a parse tree produced by modelicaParser#annotation.
def enterAnnotation(self, ctx:modelicaParser.AnnotationContext):
pass
# Exit a parse tree produced by modelicaParser#annotation.
def exitAnnotation(self, ctx:modelicaParser.AnnotationContext):
pass
del modelicaParser
| 35.352459
| 118
| 0.759062
| 2,886
| 25,878
| 6.651767
| 0.086279
| 0.05032
| 0.083867
| 0.150961
| 0.894671
| 0.849404
| 0.849404
| 0.626296
| 0.582487
| 0.128666
| 0
| 0.000758
| 0.184597
| 25,878
| 732
| 119
| 35.352459
| 0.909009
| 0.381328
| 0
| 0.489297
| 1
| 0
| 0.000064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.489297
| false
| 0.489297
| 0.021407
| 0
| 0.513761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
1ef7017896aef2ec6d8365e478dea0289d8b94df
| 38,974
|
py
|
Python
|
test/augmentation/test_augmentation_3d.py
|
mfkiwl/kornia
|
df591b3b440de84c197f7e2ef91e1d3880e35ce1
|
[
"ECL-2.0",
"Apache-2.0"
] | 4,894
|
2019-10-24T15:51:39.000Z
|
2022-03-30T22:58:33.000Z
|
test/augmentation/test_augmentation_3d.py
|
kornia/kornia
|
df193b21e33087f129dff71c6e59eab36cdcbf99
|
[
"ECL-2.0",
"Apache-2.0"
] | 912
|
2019-10-24T16:08:42.000Z
|
2022-03-31T19:07:09.000Z
|
test/augmentation/test_augmentation_3d.py
|
mfkiwl/kornia
|
df591b3b440de84c197f7e2ef91e1d3880e35ce1
|
[
"ECL-2.0",
"Apache-2.0"
] | 557
|
2019-10-24T16:02:43.000Z
|
2022-03-28T07:33:33.000Z
|
from typing import Tuple, Union
import pytest
import torch
import torch.nn as nn
from torch.autograd import gradcheck
import kornia
import kornia.testing as utils # test utils
from kornia.augmentation import (
RandomAffine3D,
RandomCrop,
RandomCrop3D,
RandomDepthicalFlip3D,
RandomEqualize3D,
RandomHorizontalFlip3D,
RandomRotation3D,
RandomVerticalFlip3D,
)
from kornia.testing import assert_close
class TestRandomHorizontalFlip3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomHorizontalFlip3D(0.5)
repr = "RandomHorizontalFlip3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=0.5)"
assert str(f) == repr
def test_random_hflip(self, device):
f = RandomHorizontalFlip3D(p=1.0, return_transform=True)
f1 = RandomHorizontalFlip3D(p=0.0, return_transform=True)
f2 = RandomHorizontalFlip3D(p=1.0)
f3 = RandomHorizontalFlip3D(p=0.0)
input = torch.tensor(
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
]
) # 2 x 3 x 4
input = input.to(device)
expected = torch.tensor(
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [2.0, 1.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [2.0, 1.0, 0.0, 0.0]],
]
) # 2 x 3 x 4
expected = expected.to(device)
expected_transform = torch.tensor(
[[-1.0, 0.0, 0.0, 3.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
) # 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor(
[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
) # 4 x 4
identity = identity.to(device)
assert (f(input)[0] == expected).all()
assert (f(input)[1] == expected_transform).all()
assert (f1(input)[0] == input).all()
assert (f1(input)[1] == identity).all()
assert (f2(input) == expected).all()
assert (f3(input) == input).all()
def test_batch_random_hflip(self, device):
f = RandomHorizontalFlip3D(p=1.0, return_transform=True)
f1 = RandomHorizontalFlip3D(p=0.0, return_transform=True)
input = torch.tensor([[[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0]]]]]) # 1 x 1 x 1 x 3 x 3
input = input.to(device)
expected = torch.tensor([[[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]]]]]) # 1 x 1 x 1 x 3 x 3
expected = expected.to(device)
expected_transform = torch.tensor(
[[[-1.0, 0.0, 0.0, 2.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
identity = identity.to(device)
input = input.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
identity = identity.repeat(5, 1, 1) # 5 x 4 x 4
assert (f(input)[0] == expected).all()
assert (f(input)[1] == expected_transform).all()
assert (f1(input)[0] == input).all()
assert (f1(input)[1] == identity).all()
def test_same_on_batch(self, device):
f = RandomHorizontalFlip3D(p=0.5, same_on_batch=True)
input = torch.eye(3).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 1, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device):
f = nn.Sequential(
RandomHorizontalFlip3D(p=1.0, return_transform=True), RandomHorizontalFlip3D(p=1.0, return_transform=True)
)
f1 = nn.Sequential(RandomHorizontalFlip3D(p=1.0, return_transform=True), RandomHorizontalFlip3D(p=1.0))
input = torch.tensor([[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0]]]]) # 1 x 1 x 3 x 3
input = input.to(device)
expected_transform = torch.tensor(
[[[-1.0, 0.0, 0.0, 2.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
expected_transform_1 = expected_transform @ expected_transform
expected_transform_1 = expected_transform_1.to(device)
assert (f(input)[0] == input).all()
assert (f(input)[1] == expected_transform_1).all()
assert (f1(input)[0] == input).all()
assert (f1(input)[1] == expected_transform).all()
def test_gradcheck(self, device):
input = torch.rand((1, 3, 3)).to(device) # 3 x 3
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomHorizontalFlip3D(p=1.0), (input,), raise_exception=True)
class TestRandomVerticalFlip3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomVerticalFlip3D(0.5)
repr = "RandomVerticalFlip3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=0.5)"
assert str(f) == repr
def test_random_vflip(self, device, dtype):
f = RandomVerticalFlip3D(p=1.0, return_transform=True)
f1 = RandomVerticalFlip3D(p=0.0, return_transform=True)
f2 = RandomVerticalFlip3D(p=1.0)
f3 = RandomVerticalFlip3D(p=0.0)
input = torch.tensor(
[
[
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0]],
]
]
],
device=device,
dtype=dtype,
) # 1 x 1 x 2 x 3 x 3
expected = torch.tensor(
[
[
[
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
]
]
],
device=device,
dtype=dtype,
) # 1 x 1 x 2 x 3 x 3
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]],
device=device,
dtype=dtype,
) # 4 x 4
identity = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]],
device=device,
dtype=dtype,
) # 1 x 4 x 4
assert_close(f(input)[0], expected)
assert_close(f(input)[1], expected_transform)
assert_close(f1(input)[0], input)
assert_close(f1(input)[1], identity)
assert_close(f2(input), expected)
assert_close(f3(input), input)
def test_batch_random_vflip(self, device):
f = RandomVerticalFlip3D(p=1.0, return_transform=True)
f1 = RandomVerticalFlip3D(p=0.0, return_transform=True)
input = torch.tensor([[[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0]]]]]) # 1 x 1 x 1 x 3 x 3
input = input.to(device)
expected = torch.tensor([[[[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]]]) # 1 x 1 x 1 x 3 x 3
expected = expected.to(device)
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
identity = identity.to(device)
input = input.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
identity = identity.repeat(5, 1, 1) # 5 x 4 x 4
assert_close(f(input)[0], expected)
assert_close(f(input)[1], expected_transform)
assert_close(f1(input)[0], input)
assert_close(f1(input)[1], identity)
def test_same_on_batch(self, device):
f = RandomVerticalFlip3D(p=0.5, same_on_batch=True)
input = torch.eye(3).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 1, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device):
f = nn.Sequential(
RandomVerticalFlip3D(p=1.0, return_transform=True), RandomVerticalFlip3D(p=1.0, return_transform=True)
)
f1 = nn.Sequential(RandomVerticalFlip3D(p=1.0, return_transform=True), RandomVerticalFlip3D(p=1.0))
input = torch.tensor([[[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 1.0]]]]]) # 1 x 1 x 1 x 4 x 4
input = input.to(device)
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
expected_transform_1 = expected_transform @ expected_transform
assert_close(f(input)[0], input)
assert_close(f(input)[1], expected_transform_1)
assert_close(f1(input)[0], input)
assert_close(f1(input)[1], expected_transform)
def test_gradcheck(self, device):
input = torch.rand((1, 3, 3)).to(device) # 4 x 4
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomVerticalFlip3D(p=1.0), (input,), raise_exception=True)
class TestRandomDepthicalFlip3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomDepthicalFlip3D(0.5)
repr = "RandomDepthicalFlip3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=0.5)"
assert str(f) == repr
def test_random_dflip(self, device, dtype):
f = RandomDepthicalFlip3D(p=1.0, return_transform=True)
f1 = RandomDepthicalFlip3D(p=0.0, return_transform=True)
f2 = RandomDepthicalFlip3D(p=1.0)
f3 = RandomDepthicalFlip3D(p=0.0)
input = torch.tensor(
[
[
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 2.0]],
]
]
],
device=device,
dtype=dtype,
) # 2 x 3 x 4
expected = torch.tensor(
[
[
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 2.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]],
]
]
],
device=device,
dtype=dtype,
) # 2 x 3 x 4
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 1.0], [0.0, 0.0, 0.0, 1.0]]],
device=device,
dtype=dtype,
) # 4 x 4
identity = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]],
device=device,
dtype=dtype,
) # 4 x 4
assert_close(f(input)[0], expected)
assert_close(f(input)[1], expected_transform)
assert_close(f1(input)[0], input)
assert_close(f1(input)[1], identity)
assert_close(f2(input), expected)
assert_close(f3(input), input)
def test_batch_random_dflip(self, device):
f = RandomDepthicalFlip3D(p=1.0, return_transform=True)
f1 = RandomDepthicalFlip3D(p=0.0, return_transform=True)
input = torch.tensor(
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 2.0]],
]
) # 2 x 3 x 4
input = input.to(device)
expected = torch.tensor(
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 2.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]],
]
) # 2 x 3 x 4
expected = expected.to(device)
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 1.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
identity = identity.to(device)
input = input.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
identity = identity.repeat(5, 1, 1) # 5 x 4 x 4
assert_close(f(input)[0], expected)
assert_close(f(input)[1], expected_transform)
assert_close(f1(input)[0], input)
assert_close(f1(input)[1], identity)
def test_same_on_batch(self, device):
f = RandomDepthicalFlip3D(p=0.5, same_on_batch=True)
input = torch.eye(3).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 2, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device):
f = nn.Sequential(
RandomDepthicalFlip3D(p=1.0, return_transform=True), RandomDepthicalFlip3D(p=1.0, return_transform=True)
)
f1 = nn.Sequential(RandomDepthicalFlip3D(p=1.0, return_transform=True), RandomDepthicalFlip3D(p=1.0))
input = torch.tensor(
[
[
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 2.0]],
]
]
]
) # 2 x 3 x 4
input = input.to(device)
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 1.0], [0.0, 0.0, 0.0, 1.0]]]
) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
expected_transform_1 = expected_transform @ expected_transform
assert_close(f(input)[0], input)
assert_close(f(input)[1], expected_transform_1)
assert_close(f1(input)[0], input)
assert_close(f1(input)[1], expected_transform)
def test_gradcheck(self, device):
input = torch.rand((1, 3, 3)).to(device) # 4 x 4
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomDepthicalFlip3D(p=1.0), (input,), raise_exception=True)
class TestRandomRotation3D:
torch.manual_seed(0) # for random reproductibility
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomRotation3D(degrees=45.5)
repr = (
"""RandomRotation3D(degrees=tensor([[-45.5000, 45.5000],
[-45.5000, 45.5000],
[-45.5000, 45.5000]]), resample=BILINEAR, align_corners=False, p=0.5, """
"""p_batch=1.0, same_on_batch=False, return_transform=False)"""
)
assert str(f) == repr
def test_random_rotation(self, device, dtype):
# This is included in doctest
torch.manual_seed(0) # for random reproductibility
f = RandomRotation3D(degrees=45.0, return_transform=True)
f1 = RandomRotation3D(degrees=45.0)
input = torch.tensor(
[
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
],
device=device,
dtype=dtype,
) # 3 x 4 x 4
expected = torch.tensor(
[
[
[
[
[0.0000, 0.0000, 0.6810, 0.5250],
[0.5052, 0.0000, 0.0000, 0.0613],
[0.1159, 0.1072, 0.5324, 0.0870],
[0.0000, 0.0000, 0.1927, 0.0000],
],
[
[0.0000, 0.1683, 0.6963, 0.1131],
[0.0566, 0.0000, 0.5215, 0.2796],
[0.0694, 0.6039, 1.4519, 1.1240],
[0.0000, 0.1325, 0.1542, 0.2510],
],
[
[0.0000, 0.2054, 0.0000, 0.0000],
[0.0026, 0.6088, 0.7358, 0.2319],
[0.1261, 1.0830, 1.3687, 1.4940],
[0.0000, 0.0416, 0.2012, 0.3124],
],
]
]
],
device=device,
dtype=dtype,
)
expected_transform = torch.tensor(
[
[
[0.6523, 0.3666, -0.6635, 0.6352],
[-0.6185, 0.7634, -0.1862, 1.4689],
[0.4382, 0.5318, 0.7247, -1.1797],
[0.0000, 0.0000, 0.0000, 1.0000],
]
],
device=device,
dtype=dtype,
)
out, mat = f(input)
assert_close(out, expected, rtol=1e-6, atol=1e-4)
assert_close(mat, expected_transform, rtol=1e-6, atol=1e-4)
torch.manual_seed(0) # for random reproductibility
assert_close(f1(input), expected, rtol=1e-6, atol=1e-4)
def test_batch_random_rotation(self, device, dtype):
torch.manual_seed(24) # for random reproductibility
f = RandomRotation3D(degrees=45.0, return_transform=True)
input = torch.tensor(
[
[
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
]
],
device=device,
dtype=dtype,
) # 1 x 1 x 4 x 4
expected = torch.tensor(
[
[
[
[
[1.0000, 0.0000, 0.0000, 2.0000],
[0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 2.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 2.0000],
],
[
[1.0000, 0.0000, 0.0000, 2.0000],
[0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 2.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 2.0000],
],
[
[1.0000, 0.0000, 0.0000, 2.0000],
[0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 2.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 2.0000],
],
]
],
[
[
[
[0.0000, 0.0726, 0.0000, 0.0000],
[0.1038, 1.0134, 0.5566, 0.1519],
[0.0000, 1.0849, 1.1068, 0.0000],
[0.1242, 1.1065, 0.9681, 0.0000],
],
[
[0.0000, 0.0047, 0.0166, 0.0000],
[0.0579, 0.4459, 0.0000, 0.4728],
[0.1864, 1.3349, 0.7530, 0.3251],
[0.1431, 1.2481, 0.4471, 0.0000],
],
[
[0.0000, 0.4840, 0.2314, 0.0000],
[0.0000, 0.0328, 0.0000, 0.1434],
[0.1899, 0.5580, 0.0000, 0.9170],
[0.0000, 0.2042, 0.1571, 0.0855],
],
]
],
],
device=device,
dtype=dtype,
)
expected_transform = torch.tensor(
[
[
[1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 1.0000],
],
[
[0.7522, -0.6326, -0.1841, 1.5047],
[0.6029, 0.5482, 0.5796, -0.8063],
[-0.2657, -0.5470, 0.7938, 1.4252],
[0.0000, 0.0000, 0.0000, 1.0000],
],
],
device=device,
dtype=dtype,
)
input = input.repeat(2, 1, 1, 1, 1) # 5 x 4 x 4 x 3
out, mat = f(input)
assert_close(out, expected, rtol=1e-6, atol=1e-4)
assert_close(mat, expected_transform, rtol=1e-6, atol=1e-4)
def test_same_on_batch(self, device, dtype):
f = RandomRotation3D(degrees=40, same_on_batch=True)
input = torch.eye(6, device=device, dtype=dtype).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 3, 6, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device, dtype):
torch.manual_seed(24) # for random reproductibility
f = nn.Sequential(
RandomRotation3D(torch.tensor([-45.0, 90]), return_transform=True),
RandomRotation3D(10.4, return_transform=True),
)
f1 = nn.Sequential(RandomRotation3D(torch.tensor([-45.0, 90]), return_transform=True), RandomRotation3D(10.4))
input = torch.tensor(
[
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 0.0, 1.0, 2.0]],
],
device=device,
dtype=dtype,
) # 3 x 4 x 4
expected = torch.tensor(
[
[
[
[
[0.3431, 0.1239, 0.0000, 1.0348],
[0.0000, 0.2035, 0.1139, 0.1770],
[0.0789, 0.9057, 1.7780, 0.0000],
[0.0000, 0.2286, 1.2498, 1.2643],
],
[
[0.5460, 0.2131, 0.0000, 1.1453],
[0.0000, 0.0899, 0.0000, 0.4293],
[0.0797, 1.0193, 1.6677, 0.0000],
[0.0000, 0.2458, 1.2765, 1.0920],
],
[
[0.6322, 0.2614, 0.0000, 0.9207],
[0.0000, 0.0037, 0.0000, 0.6551],
[0.0689, 0.9251, 1.3442, 0.0000],
[0.0000, 0.2449, 0.9856, 0.6862],
],
]
]
],
device=device,
dtype=dtype,
)
expected_transform = torch.tensor(
[
[
[0.9857, -0.1686, -0.0019, 0.2762],
[0.1668, 0.9739, 0.1538, -0.3650],
[-0.0241, -0.1520, 0.9881, 0.2760],
[0.0000, 0.0000, 0.0000, 1.0000],
]
],
device=device,
dtype=dtype,
)
expected_transform_2 = torch.tensor(
[
[
[0.2348, -0.1615, 0.9585, 0.4316],
[0.1719, 0.9775, 0.1226, -0.3467],
[-0.9567, 0.1360, 0.2573, 1.9738],
[0.0000, 0.0000, 0.0000, 1.0000],
]
],
device=device,
dtype=dtype,
)
out, mat = f(input)
_, mat_2 = f1(input)
assert_close(out, expected, rtol=1e-6, atol=1e-4)
assert_close(mat, expected_transform, rtol=1e-6, atol=1e-4)
assert_close(mat_2, expected_transform_2, rtol=1e-6, atol=1e-4)
def test_gradcheck(self, device):
torch.manual_seed(0) # for random reproductibility
input = torch.rand((3, 3, 3)).to(device) # 3 x 3 x 3
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomRotation3D(degrees=(15.0, 15.0), p=1.0), (input,), raise_exception=True)
class TestRandomCrop3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomCrop3D(size=(2, 3, 4), padding=(0, 1, 2), fill=10, pad_if_needed=False, p=1.0)
repr = (
"RandomCrop3D(crop_size=(2, 3, 4), padding=(0, 1, 2), fill=10, pad_if_needed=False, "
"padding_mode=constant, resample=BILINEAR, p=1.0, p_batch=1.0, same_on_batch=False, "
"return_transform=False)"
)
assert str(f) == repr
@pytest.mark.parametrize("batch_size", [1, 2])
def test_no_padding(self, batch_size, device, dtype):
torch.manual_seed(42)
inp = torch.tensor(
[
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
]
]
]
],
device=device,
dtype=dtype,
).repeat(batch_size, 1, 5, 1, 1)
f = RandomCrop3D(size=(2, 3, 4), padding=None, align_corners=True, p=1.0)
out = f(inp)
if batch_size == 1:
expected = torch.tensor(
[[[[[11, 12, 13, 14], [16, 17, 18, 19], [21, 22, 23, 24]]]]], device=device, dtype=dtype
).repeat(batch_size, 1, 2, 1, 1)
if batch_size == 2:
expected = torch.tensor(
[
[
[
[
[6.0000, 7.0000, 8.0000, 9.0000],
[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000],
],
[
[6.0000, 7.0000, 8.0000, 9.0000],
[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000],
],
]
],
[
[
[
[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000],
[21.0000, 22.0000, 23.0000, 24.0000],
],
[
[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000],
[21.0000, 22.0000, 23.0000, 24.0000],
],
]
],
],
device=device,
dtype=dtype,
)
assert_close(out, expected, atol=1e-4, rtol=1e-4)
def test_same_on_batch(self, device, dtype):
f = RandomCrop3D(size=(2, 3, 4), padding=None, align_corners=True, p=1.0, same_on_batch=True)
input = torch.eye(6).unsqueeze(dim=0).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 3, 5, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
@pytest.mark.parametrize("padding", [1, (1, 1, 1), (1, 1, 1, 1, 1, 1)])
def test_padding_batch(self, padding, device, dtype):
torch.manual_seed(42)
batch_size = 2
inp = torch.tensor([[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]], device=device, dtype=dtype).repeat(
batch_size, 1, 3, 1, 1
)
expected = torch.tensor(
[
[
[
[[0.0, 1.0, 2.0, 10.0], [3.0, 4.0, 5.0, 10.0], [6.0, 7.0, 8.0, 10.0]],
[[0.0, 1.0, 2.0, 10.0], [3.0, 4.0, 5.0, 10.0], [6.0, 7.0, 8.0, 10.0]],
]
],
[
[
[[3.0, 4.0, 5.0, 10.0], [6.0, 7.0, 8.0, 10.0], [10, 10, 10, 10.0]],
[[3.0, 4.0, 5.0, 10.0], [6.0, 7.0, 8.0, 10.0], [10, 10, 10, 10.0]],
]
],
],
device=device,
dtype=dtype,
)
f = RandomCrop3D(size=(2, 3, 4), fill=10.0, padding=padding, align_corners=True, p=1.0)
out = f(inp)
assert_close(out, expected, atol=1e-4, rtol=1e-4)
def test_pad_if_needed(self, device, dtype):
torch.manual_seed(42)
inp = torch.tensor([[[0.0, 1.0, 2.0]]], device=device, dtype=dtype)
expected = torch.tensor(
[
[
[
[[9.0, 9.0, 9.0, 9.0], [9.0, 9.0, 9.0, 9.0], [9.0, 9.0, 9.0, 9.0]],
[[0.0, 1.0, 2.0, 9.0], [9.0, 9.0, 9.0, 9.0], [9.0, 9.0, 9.0, 9.0]],
]
]
],
device=device,
dtype=dtype,
)
rc = RandomCrop3D(size=(2, 3, 4), pad_if_needed=True, fill=9, align_corners=True, p=1.0)
out = rc(inp)
assert_close(out, expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device, dtype):
torch.manual_seed(0) # for random reproductibility
inp = torch.rand((3, 3, 3), device=device, dtype=dtype) # 3 x 3
inp = utils.tensor_to_gradcheck_var(inp) # to var
assert gradcheck(RandomCrop3D(size=(3, 3, 3), p=1.0), (inp,), raise_exception=True)
@pytest.mark.skip("Need to fix Union type")
def test_jit(self, device, dtype):
# Define script
op = RandomCrop(size=(3, 3), p=1.0).forward
op_script = torch.jit.script(op)
img = torch.ones(1, 1, 5, 6, device=device, dtype=dtype)
actual = op_script(img)
expected = kornia.center_crop3d(img)
assert_close(actual, expected)
@pytest.mark.skip("Need to fix Union type")
def test_jit_trace(self, device, dtype):
# Define script
op = RandomCrop(size=(3, 3), p=1.0).forward
op_script = torch.jit.script(op)
# 1. Trace op
img = torch.ones(1, 1, 5, 6, device=device, dtype=dtype)
op_trace = torch.jit.trace(op_script, (img,))
# 2. Generate new input
img = torch.ones(1, 1, 5, 6, device=device, dtype=dtype)
# 3. Evaluate
actual = op_trace(img)
expected = op(img)
assert_close(actual, expected)
class TestCenterCrop3D:
def test_no_transform(self, device, dtype):
inp = torch.rand(1, 2, 4, 4, 4, device=device, dtype=dtype)
out = kornia.augmentation.CenterCrop3D(2)(inp)
assert out.shape == (1, 2, 2, 2, 2)
def test_transform(self, device, dtype):
inp = torch.rand(1, 2, 5, 4, 8, device=device, dtype=dtype)
out = kornia.augmentation.CenterCrop3D(2, return_transform=True)(inp)
assert len(out) == 2
assert out[0].shape == (1, 2, 2, 2, 2)
assert out[1].shape == (1, 4, 4)
def test_no_transform_tuple(self, device, dtype):
inp = torch.rand(1, 2, 5, 4, 8, device=device, dtype=dtype)
out = kornia.augmentation.CenterCrop3D((3, 4, 5))(inp)
assert out.shape == (1, 2, 3, 4, 5)
def test_gradcheck(self, device, dtype):
input = torch.rand(1, 2, 3, 4, 5, device=device, dtype=dtype)
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(kornia.augmentation.CenterCrop3D(3), (input,), raise_exception=True)
class TestRandomEqualize3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self, device, dtype):
f = RandomEqualize3D(p=0.5)
repr = "RandomEqualize3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=False)"
assert str(f) == repr
def test_random_equalize(self, device, dtype):
f = RandomEqualize3D(p=1.0, return_transform=True)
f1 = RandomEqualize3D(p=0.0, return_transform=True)
f2 = RandomEqualize3D(p=1.0)
f3 = RandomEqualize3D(p=0.0)
bs, channels, depth, height, width = 1, 3, 6, 10, 10
inputs3d = self.build_input(channels, depth, height, width, bs, device=device, dtype=dtype)
row_expected = torch.tensor(
[0.0000, 0.11764, 0.2353, 0.3529, 0.4706, 0.5882, 0.7059, 0.8235, 0.9412, 1.0000],
device=device,
dtype=dtype,
)
expected = self.build_input(channels, depth, height, width, bs=1, row=row_expected, device=device, dtype=dtype)
identity = kornia.eye_like(4, expected)
assert_close(f(inputs3d)[0], expected, rtol=1e-4, atol=1e-4)
assert_close(f(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_close(f1(inputs3d)[0], inputs3d, rtol=1e-4, atol=1e-4)
assert_close(f1(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_close(f2(inputs3d), expected, rtol=1e-4, atol=1e-4)
assert_close(f3(inputs3d), inputs3d, rtol=1e-4, atol=1e-4)
def test_batch_random_equalize(self, device, dtype):
f = RandomEqualize3D(p=1.0, return_transform=True)
f1 = RandomEqualize3D(p=0.0, return_transform=True)
f2 = RandomEqualize3D(p=1.0)
f3 = RandomEqualize3D(p=0.0)
bs, channels, depth, height, width = 2, 3, 6, 10, 10
inputs3d = self.build_input(channels, depth, height, width, bs, device=device, dtype=dtype)
row_expected = torch.tensor([0.0000, 0.11764, 0.2353, 0.3529, 0.4706, 0.5882, 0.7059, 0.8235, 0.9412, 1.0000])
expected = self.build_input(channels, depth, height, width, bs, row=row_expected, device=device, dtype=dtype)
identity = kornia.eye_like(4, expected) # 2 x 4 x 4
assert_close(f(inputs3d)[0], expected, rtol=1e-4, atol=1e-4)
assert_close(f(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_close(f1(inputs3d)[0], inputs3d, rtol=1e-4, atol=1e-4)
assert_close(f1(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_close(f2(inputs3d), expected, rtol=1e-4, atol=1e-4)
assert_close(f3(inputs3d), inputs3d, rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
f = RandomEqualize3D(p=0.5, same_on_batch=True)
input = torch.eye(4, device=device, dtype=dtype)
input = input.unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 2, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_gradcheck(self, device, dtype):
torch.manual_seed(0) # for random reproductibility
inputs3d = torch.rand((3, 3, 3), device=device, dtype=dtype) # 3 x 3 x 3
inputs3d = utils.tensor_to_gradcheck_var(inputs3d) # to var
assert gradcheck(RandomEqualize3D(p=0.5), (inputs3d,), raise_exception=True)
@staticmethod
def build_input(channels, depth, height, width, bs=1, row=None, device='cpu', dtype=torch.float32):
if row is None:
row = torch.arange(width, device=device, dtype=dtype) / float(width)
channel = torch.stack([row] * height)
image = torch.stack([channel] * channels)
image3d = torch.stack([image] * depth).transpose(0, 1)
batch = torch.stack([image3d] * bs)
return batch.to(device, dtype)
class TestRandomAffine3D:
def test_batch_random_affine_3d(self, device, dtype):
f = RandomAffine3D((0, 0, 0), p=1., return_transform=True) # No rotation
tensor = torch.tensor([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]],
device=device, dtype=dtype) # 1 x 1 x 1 x 3 x 3
expected = torch.tensor([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]],
device=device, dtype=dtype) # 1 x 1 x 1 x 3 x 3
expected_transform = torch.tensor(
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]],
device=device, dtype=dtype) # 1 x 4 x 4
tensor = tensor.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
assert (f(tensor)[0] == expected).all()
assert (f(tensor)[1] == expected_transform).all()
| 39.487335
| 119
| 0.481731
| 5,740
| 38,974
| 3.208711
| 0.063589
| 0.107938
| 0.140895
| 0.16397
| 0.825605
| 0.790151
| 0.777229
| 0.755294
| 0.733033
| 0.704148
| 0
| 0.175957
| 0.356494
| 38,974
| 986
| 120
| 39.527383
| 0.558413
| 0.04775
| 0
| 0.52125
| 0
| 0.0075
| 0.024899
| 0.007176
| 0
| 0
| 0
| 0.002028
| 0.1175
| 1
| 0.05375
| false
| 0
| 0.01125
| 0
| 0.07625
| 0.0075
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1ef89f3c14f3d6166d3ba2af7220b1bdc6bf1857
| 6,824
|
py
|
Python
|
cask/spirits/migrations/0001_initial.py
|
dcramer/cask-server
|
32535229a907479c3645aa34b75755d3e2b12dda
|
[
"Apache-2.0"
] | 2
|
2018-08-07T16:18:54.000Z
|
2020-11-18T15:04:41.000Z
|
cask/spirits/migrations/0001_initial.py
|
dcramer/cask-server
|
32535229a907479c3645aa34b75755d3e2b12dda
|
[
"Apache-2.0"
] | 2
|
2020-02-11T23:07:31.000Z
|
2020-06-05T18:56:19.000Z
|
cask/spirits/migrations/0001_initial.py
|
dcramer/cask-server
|
32535229a907479c3645aa34b75755d3e2b12dda
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1 on 2018-08-09 21:59
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("world", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Bottle",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=128)),
("age", models.PositiveSmallIntegerField(null=True)),
("distillation_date", models.DateTimeField(null=True)),
("bottle_date", models.DateTimeField(null=True)),
("abv", models.DecimalField(decimal_places=2, max_digits=5)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="Brand",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=128, unique=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"created_by",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="CaskType",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=128, unique=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"created_by",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Distillery",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=128, unique=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"country",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="world.Country"
),
),
(
"created_by",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"region",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="world.Region"
),
),
],
),
migrations.CreateModel(
name="FlavorProfile",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=128, unique=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"created_by",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="SpiritType",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=128, unique=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"created_by",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AddField(
model_name="bottle",
name="brand",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="spirits.Brand"
),
),
migrations.AddField(
model_name="bottle",
name="created_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="bottle",
name="distillery",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="spirits.Distillery"
),
),
migrations.AddField(
model_name="bottle",
name="spirit_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="spirits.SpiritType"
),
),
]
| 33.615764
| 87
| 0.408558
| 483
| 6,824
| 5.610766
| 0.175983
| 0.038376
| 0.061993
| 0.097417
| 0.783395
| 0.760517
| 0.73321
| 0.73321
| 0.717712
| 0.717712
| 0
| 0.012809
| 0.49663
| 6,824
| 202
| 88
| 33.782178
| 0.776128
| 0.006301
| 0
| 0.757732
| 1
| 0
| 0.058416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020619
| 0
| 0.041237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
487b215e2dbeee3d53464f2d31cf16ad85ddb13d
| 5,706
|
py
|
Python
|
model/super_resolution_model/DocumentSRModel/networks/hed.py
|
JinGyeSetBirdsFree/FudanOCR
|
e6b18b0eefaf832b2eb7198f5df79e00bd4cee36
|
[
"MIT"
] | 25
|
2020-02-29T12:14:10.000Z
|
2020-04-24T07:56:06.000Z
|
model/super_resolution_model/DocumentSRModel/networks/hed.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | 33
|
2020-12-10T19:15:39.000Z
|
2022-03-12T00:17:30.000Z
|
model/super_resolution_model/DocumentSRModel/networks/hed.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | 4
|
2020-02-29T12:14:18.000Z
|
2020-04-12T12:26:50.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from networks.baseblocks import *
class HED(nn.Module):
def __init__(self, in_dims=3, out_dims=1):
super(HED, self).__init__()
self.conv1 = nn.Sequential(
ConvBlock(in_dims, 64, 3, norm=None, activation='relu'),
ConvBlock(64, 64, 3, norm=None, activation='relu')
)
self.conv2 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(64, 128, 3, norm=None, activation='relu'),
ConvBlock(128, 128, 3, norm=None, activation='relu')
)
self.conv3 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(128, 256, 3, norm=None, activation='relu'),
ConvBlock(256, 256, 3, norm=None, activation='relu'),
ConvBlock(256, 256, 3, norm=None, activation='relu')
)
self.conv4 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(256, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu')
)
self.conv5 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(512, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu')
)
self.dsn1 = nn.Conv2d(64, out_dims, 1)
self.dsn2 = nn.Conv2d(128, out_dims, 1)
self.dsn3 = nn.Conv2d(256, out_dims, 1)
self.dsn4 = nn.Conv2d(512, out_dims, 1)
self.dsn5 = nn.Conv2d(512, out_dims, 1)
self.fuse = nn.Conv2d(5, out_dims, 1)
self.threshold = nn.Threshold(0.24, 0)
def forward(self, x):
h = x.size(2)
w = x.size(3)
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
d1 = self.dsn1(x1)
d2 = F.interpolate(self.dsn2(x2), size=(h, w))
d3 = F.interpolate(self.dsn3(x3), size=(h, w))
d4 = F.interpolate(self.dsn4(x4), size=(h, w))
d5 = F.interpolate(self.dsn5(x5), size=(h, w))
fuse = self.fuse(torch.cat((d1, d2, d3, d4, d5), 1))
d1 = torch.sigmoid(d1)
d2 = torch.sigmoid(d2)
d3 = torch.sigmoid(d3)
d4 = torch.sigmoid(d4)
d5 = torch.sigmoid(d5)
fuse = torch.sigmoid(fuse)
return d1, d2, d3, d4, d5, fuse
class HED_NUP(nn.Module):
def __init__(self, in_dims=3, out_dims=1):
super(HED, self).__init__()
self.conv1 = nn.Sequential(
ConvBlock(in_dims, 64, 3, norm=None, activation='relu'),
ConvBlock(64, 64, 3, norm=None, activation='relu')
)
self.conv2 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(64, 128, 3, norm=None, activation='relu'),
ConvBlock(128, 128, 3, norm=None, activation='relu')
)
self.conv3 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(128, 256, 3, norm=None, activation='relu'),
ConvBlock(256, 256, 3, norm=None, activation='relu'),
ConvBlock(256, 256, 3, norm=None, activation='relu')
)
self.conv4 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(256, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu')
)
self.conv5 = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
ConvBlock(512, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu'),
ConvBlock(512, 512, 3, norm=None, activation='relu')
)
self.dsn1 = nn.Conv2d(64, out_dims, 1)
self.dsn2 = nn.Conv2d(128, out_dims, 1)
self.dsn3 = nn.Conv2d(256, out_dims, 1)
self.dsn4 = nn.Conv2d(512, out_dims, 1)
self.dsn5 = nn.Conv2d(512, out_dims, 1)
self.fuse = nn.Conv2d(5, out_dims, 1)
def forward(self, x):
h = x.size(2)
w = x.size(3)
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
d1 = self.dsn1(x1)
d2 = self.dsn2(x2)
d3 = self.dsn3(x3)
d4 = self.dsn4(x4)
d5 = self.dsn5(x5)
fuse = self.fuse(torch.cat((d1, d2, d3, d4, d5), 1))
d1 = torch.sigmoid(d1)
d2 = torch.sigmoid(d2)
d3 = torch.sigmoid(d3)
d4 = torch.sigmoid(d4)
d5 = torch.sigmoid(d5)
fuse = torch.sigmoid(fuse)
return d1, d2, d3, d4, d5, fuse
class HED_1L(nn.Module):
def __init__(self, in_dims=3, out_dims=1):
super(HED_1L, self).__init__()
self.conv1 = nn.Sequential(
ConvBlock(in_dims, 64, 3, norm=None, activation='relu'),
ConvBlock(64, 64, 3, norm=None, activation='relu')
)
self.dsn1 = nn.Conv2d(64, out_dims, 1)
def forward(self, x):
x1 = self.conv1(x)
d1 = self.dsn1(x1)
d1 = torch.sigmoid(d1)
return d1
def hed_weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
init.constant_(m.bias.data, 0.1)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
| 36.113924
| 68
| 0.55538
| 792
| 5,706
| 3.920455
| 0.112374
| 0.045089
| 0.081159
| 0.171337
| 0.831562
| 0.831562
| 0.831562
| 0.826731
| 0.826731
| 0.826731
| 0
| 0.103062
| 0.296004
| 5,706
| 158
| 69
| 36.113924
| 0.669903
| 0
| 0
| 0.730496
| 0
| 0
| 0.019625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049645
| false
| 0
| 0.042553
| 0
| 0.134752
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6fc2e9ed8df1f19bf987364c77e522f49a5a3833
| 3,456
|
py
|
Python
|
test/test_pii_analyzer.py
|
shahrukhx01/obsei
|
ca1f8ecde28ac6003c6112cffbb690a235e86f0b
|
[
"Apache-2.0"
] | null | null | null |
test/test_pii_analyzer.py
|
shahrukhx01/obsei
|
ca1f8ecde28ac6003c6112cffbb690a235e86f0b
|
[
"Apache-2.0"
] | null | null | null |
test/test_pii_analyzer.py
|
shahrukhx01/obsei
|
ca1f8ecde28ac6003c6112cffbb690a235e86f0b
|
[
"Apache-2.0"
] | null | null | null |
from obsei.analyzer.base_analyzer import AnalyzerRequest
from obsei.analyzer.pii_analyzer import PresidioPIIAnalyzerConfig
text_to_anonymize = "His name is Mr. Jones. His phone number is 212-555-5555 and email is jones@email.com"
PII_LIST = ["Jones", "212-555-5555", "jones@email.com"]
TEXTS = [text_to_anonymize]
def test_pii_analyzer_replace_original(pii_analyzer):
analyzer_config = PresidioPIIAnalyzerConfig(
analyze_only=False, return_decision_process=True, replace_original_text=True
)
source_responses = [
AnalyzerRequest(processed_text=text, source_name="sample") for text in TEXTS
]
analyzer_responses = pii_analyzer.analyze_input(
source_response_list=source_responses, analyzer_config=analyzer_config
)
assert len(analyzer_responses) == len(TEXTS)
for text, analyzer_response in zip(TEXTS, analyzer_responses):
assert analyzer_response.segmented_data is not None
assert analyzer_response.segmented_data["analyzer_result"] is not None
assert analyzer_response.segmented_data["anonymized_result"] is not None
assert analyzer_response.segmented_data["anonymized_text"] is not None
for pii_info in PII_LIST:
assert pii_info not in analyzer_response.segmented_data["anonymized_text"]
assert (
analyzer_response.segmented_data["anonymized_text"]
== analyzer_response.processed_text
)
assert analyzer_response.segmented_data["anonymized_text"] != text
def test_pii_analyzer_not_replace_original(pii_analyzer):
analyzer_config = PresidioPIIAnalyzerConfig(
analyze_only=False, return_decision_process=True, replace_original_text=False
)
source_responses = [
AnalyzerRequest(processed_text=text, source_name="sample") for text in TEXTS
]
analyzer_responses = pii_analyzer.analyze_input(
source_response_list=source_responses, analyzer_config=analyzer_config
)
assert len(analyzer_responses) == len(TEXTS)
for text, analyzer_response in zip(TEXTS, analyzer_responses):
assert analyzer_response.segmented_data is not None
assert analyzer_response.segmented_data["analyzer_result"] is not None
assert analyzer_response.segmented_data["anonymized_result"] is not None
assert analyzer_response.segmented_data["anonymized_text"] is not None
for pii_info in PII_LIST:
assert pii_info not in analyzer_response.segmented_data["anonymized_text"]
assert analyzer_response.processed_text == text
assert analyzer_response.segmented_data["anonymized_text"] != text
def test_pii_analyzer_analyze_only(pii_analyzer):
analyzer_config = PresidioPIIAnalyzerConfig(
analyze_only=True, return_decision_process=True
)
source_responses = [
AnalyzerRequest(processed_text=text, source_name="sample") for text in TEXTS
]
analyzer_responses = pii_analyzer.analyze_input(
source_response_list=source_responses, analyzer_config=analyzer_config
)
assert len(analyzer_responses) == len(TEXTS)
for text, analyzer_response in zip(TEXTS, analyzer_responses):
assert analyzer_response.segmented_data is not None
assert analyzer_response.segmented_data["analyzer_result"] is not None
assert analyzer_response.segmented_data["anonymized_result"] is None
assert text == analyzer_response.processed_text
| 40.186047
| 106
| 0.754051
| 416
| 3,456
| 5.932692
| 0.134615
| 0.142626
| 0.162075
| 0.188006
| 0.866694
| 0.843193
| 0.843193
| 0.818477
| 0.807536
| 0.807536
| 0
| 0.00705
| 0.179109
| 3,456
| 85
| 107
| 40.658824
| 0.862883
| 0
| 0
| 0.578125
| 0
| 0.015625
| 0.096933
| 0
| 0
| 0
| 0
| 0
| 0.328125
| 1
| 0.046875
| false
| 0
| 0.03125
| 0
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fc348edc329c1378ba5d7e8ce766a8b19475823
| 149
|
py
|
Python
|
curris/test/test_header.py
|
a1trl9/curris
|
f9b55a7a2a8864882f297a4a324ccff182176417
|
[
"MIT"
] | null | null | null |
curris/test/test_header.py
|
a1trl9/curris
|
f9b55a7a2a8864882f297a4a324ccff182176417
|
[
"MIT"
] | null | null | null |
curris/test/test_header.py
|
a1trl9/curris
|
f9b55a7a2a8864882f297a4a324ccff182176417
|
[
"MIT"
] | null | null | null |
from curris.test.base import compare_json
def test_header():
compare_json('curris/test/resource/header.md', 'curris/test/resource/header.json')
| 29.8
| 86
| 0.778523
| 22
| 149
| 5.136364
| 0.5
| 0.265487
| 0.318584
| 0.424779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087248
| 149
| 4
| 87
| 37.25
| 0.830882
| 0
| 0
| 0
| 0
| 0
| 0.416107
| 0.416107
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
6ff42509e475db4afb1775dffa4702b2923b092b
| 3,418
|
py
|
Python
|
evalharr1.py
|
revorg7/viola-jones
|
9f757ac40a6e9eb78f4e60e983e9ac996b7ea5af
|
[
"MIT"
] | 1
|
2021-12-11T12:18:02.000Z
|
2021-12-11T12:18:02.000Z
|
evalharr1.py
|
revorg7/viola-jones
|
9f757ac40a6e9eb78f4e60e983e9ac996b7ea5af
|
[
"MIT"
] | 1
|
2018-03-01T06:13:52.000Z
|
2018-04-10T21:00:48.000Z
|
evalharr1.py
|
revorg7/Viola-Jones
|
9f757ac40a6e9eb78f4e60e983e9ac996b7ea5af
|
[
"MIT"
] | null | null | null |
#build on this code
#http://stackoverflow.com/questions/19297790/fastest-python-way-to-evaluate-haar-feature-values
def Zero(x,y,i,h,w):
#bottom rect - top rect
if x==0 and y==0:
return i.item(x+2*h-1,y+w-1) -2*(i.item(x+h-1,y+w-1))
elif y==0:
return (i.item(x+2*h-1,y+w-1))-2*(i.item(x+h-1,y+w-1))+(i.item(x-1,y+w-1))
elif x==0:
return i.item(x+2*h-1,y+w-1)+2*(i.item(x+h-1,y-1))-2*(i.item(x+h-1,y+w-1))-i.item(x+2*h-1,y-1)
else:
bright = (i.item(x+h-1,y+w-1)+i.item(x-1,y-1))-(i.item(x-1,y+w-1)+i.item(x+h-1,y-1))
dark = (i.item(x+2*h-1,y+w-1)+i.item(x+h-1,y-1))-(i.item(x+h-1,y+w-1)+i.item(x+2*h-1,y-1))
return dark - bright
def One(x,y,i,h,w):
#left rect - right rect
if x==0 and y==0:
return 2*(i.item(x+h-1,y+w-1))-(i.item(x+h-1,y+2*w-1))
elif y==0:
return 2*(i.item(x+h-1,y+w-1))-2*(i.item(x-1,y+w-1))+(i.item(x-1,y+2*w-1))-(i.item(x+h-1,y+2*w-1))
elif x==0:
return 2*(i.item(x+h-1,y+w-1))-(i.item(x+h-1,y+2*w-1)+i.item(x+h-1,y-1))
else:
bright = (i.item(x+h-1,y+2*w-1)+i.item(x-1,y+w-1))-(i.item(x-1,y+2*w-1)+i.item(x+h-1,y+w-1))
dark = (i.item(x+h-1,y+w-1)+i.item(x-1,y-1))-(i.item(x-1,y+w-1)+i.item(x+h-1,y-1))
return dark-bright
def Two(x,y,i,h,w):
#brights are centered, just 1
if x!=0 and y!=0:
dark = i.item(x+3*h-1,y+w-1)+i.item(x-1,y-1)-i.item(x+3*h-1,y-1)-i.item(x-1,y+w-1)
bright = i.item(x+2*h-1,y+w-1)+i.item(x+h-1,y-1)-i.item(x+h-1,y+w-1)-i.item(x+2*h-1,y-1)
return dark - 2*bright
#simplication of above formula, all eqs st x-1 < 0 or y-1<0 are set to zero
elif x==0 and y==0:
dark = i.item(x+3*h-1,y+w-1)
bright = i.item(x+2*h-1,y+w-1)-i.item(x+h-1,y+w-1)
return dark - 2*bright
elif x==0:
dark = i.item(x+3*h-1,y+w-1)-i.item(x+3*h-1,y-1)
bright = i.item(x+2*h-1,y+w-1)+i.item(x+h-1,y-1)-i.item(x+h-1,y+w-1)-i.item(x+2*h-1,y-1)
return dark - 2*bright
else:
dark = i.item(x+3*h-1,y+w-1)-i.item(x-1,y+w-1)
bright = i.item(x+2*h-1,y+w-1)-i.item(x+h-1,y+w-1)
return dark - 2*bright
def Three(x,y,i,h,w):
#bright is centered, just1
if x!=0 and y!=0:
dark = i.item(x+h-1,y+3*w-1)+i.item(x-1,y-1)-i.item(x-1,y+3*w-1)-i.item(x+h-1,y-1)
bright = i.item(x+h-1,y+2*w-1)+i.item(x-1,y+w-1)-i.item(x-1,y+2*w-1)-i.item(x+h-1,y+w-1)
return dark - 2*bright
elif x==0 and y==0:
dark = i.item(x+h-1,y+3*w-1)
bright = i.item(x+h-1,y+2*w-1)-i.item(x+h-1,y+w-1)
return dark - 2*bright
elif x==0:
dark = i.item(x+h-1,y+3*w-1)-i.item(x+h-1,y-1)
bright = i.item(x+h-1,y+2*w-1)-i.item(x+h-1,y+w-1)
return dark - 2*bright
else:
dark = i.item(x+h-1,y+3*w-1)-i.item(x-1,y+3*w-1)
bright = i.item(x+h-1,y+2*w-1)+i.item(x-1,y+w-1)-i.item(x-1,y+2*w-1)-i.item(x+h-1,y+w-1)
return dark - 2*bright
def Four(x,y,i,h,w):
#unlike others input full-feature height,width i.e h,w = 2*k
#type-D, top-leftmost corner is assumed dark
return One(x,y,i,h/2,w/2)-One(x+h/2,y,i,h/2,w/2)
def EvaluateHaar(Lis,(x,y,h,w,f,p)):
options = {0 : Zero,
1 : One,
2 : Two,
3 : Three,
4 : Four,
}
R = []
append1 = R.append
for i in Lis:
append1(options[f](x,y,i,h,w))
return R
| 31.357798
| 112
| 0.513751
| 901
| 3,418
| 1.948946
| 0.09323
| 0.216401
| 0.259681
| 0.179385
| 0.754556
| 0.731207
| 0.718679
| 0.716401
| 0.687927
| 0.683941
| 0
| 0.092599
| 0.213283
| 3,418
| 108
| 113
| 31.648148
| 0.560431
| 0.112932
| 0
| 0.485714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d20c54e611d8f7927f588b2f3dfef2fc1fa42df2
| 320
|
py
|
Python
|
temboo/core/Library/Salesforce/OAuth/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Salesforce/OAuth/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Salesforce/OAuth/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Salesforce.OAuth.FinalizeOAuth import FinalizeOAuth, FinalizeOAuthInputSet, FinalizeOAuthResultSet, FinalizeOAuthChoreographyExecution
from temboo.Library.Salesforce.OAuth.InitializeOAuth import InitializeOAuth, InitializeOAuthInputSet, InitializeOAuthResultSet, InitializeOAuthChoreographyExecution
| 106.666667
| 164
| 0.9125
| 22
| 320
| 13.272727
| 0.636364
| 0.068493
| 0.116438
| 0.184932
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04375
| 320
| 2
| 165
| 160
| 0.954248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d23e9bbc3b5322d329707c26017327ea14e0a846
| 21,219
|
py
|
Python
|
SalvageScripts/test/test_helpers.py
|
iannn/SalvageCalculator1
|
35a11578436ac65341e923a4b38d57ccd6256aa6
|
[
"MIT"
] | null | null | null |
SalvageScripts/test/test_helpers.py
|
iannn/SalvageCalculator1
|
35a11578436ac65341e923a4b38d57ccd6256aa6
|
[
"MIT"
] | null | null | null |
SalvageScripts/test/test_helpers.py
|
iannn/SalvageCalculator1
|
35a11578436ac65341e923a4b38d57ccd6256aa6
|
[
"MIT"
] | null | null | null |
import pytest
""" last result sys hack
import sys
sys.path.append("../SalvageScripts")
"""
#I see a lot of fighting on what to do with proper importing and packaging of projects
import calc_helpers
@pytest.fixture
def refined_scalar():
#Taken directly from calc_salvage. This DOES have Lucent Crystal though so maybe I should merge these into calc_helpers for completeness and consistency
refined_scalar = {'Stretched Rawhide Leather Square':2,'Cured Thin Leather Square':2,'Cured Coarse Leather Square':2,'Cured Rugged Leather Square':2,'Cured Thick Leather Square':4,'Cured Hardened Leather Square':3,
'Copper Ingot':2,'Bronze Ingot':2,'Silver Ingot':2,'Iron Ingot':3,'Steel Ingot':3,'Gold Ingot':2,'Platinum Ingot':2,'Darksteel Ingot':2,'Mithril Ingot':2,'Orichalcum Ingot':2,
'Bolt of Jute':2,'Bolt of Wool':2,'Bolt of Cotton':2,'Bolt of Linen':2,'Bolt of Silk':3,'Bolt of Gossamer':2,
'Green Wood Plank':3,'Soft Wood Plank':2,'Seasoned Wood Plank':3,'Hard Wood Plank':3,'Elder Wood Plank':3,'Ancient Wood Plank':3,
'Pile of Lucent Crystal':10}
return refined_scalar
@pytest.fixture
def refined_lookup():
#taken directly from calc_salvage. No lucent mote though
unrefined_to_refined = {'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'}
return unrefined_to_refined
"""
The purpose of testing generate_multiplier is to make sure that:
the proper ingots are being selected with the metals
regular unrefiend and refined materials are being evaluated correctly
materials can be "passed" on being refined correctly by not having their
"""
"""
generate_multiplier needs
5 input arguments to run (unrefined_dct,refined_dct,refined_scalar,refined_lookup,buysell)
2 output arguments to check (expected_multiplier_dct,expected_decision_dct)
1 addition check of refined_lookup for changes
2 of these are constants so they are prime candidates for being fixtures:
refined_scalar
refined_lookup
each individual case needs unique value combinations for (in this order):
unrefined_dct = dictionary with unrefined material and price in the format of "name":[buy order price, sell listing price]
refined_dct = dictionary with refined material and price in the format of "name":[buy order price, sell listing price]
buysell = 0 or 1 representing the buy or sell price position
expected_multiplier_dct = dictionary with expected value of material price with best value in the format of "name": price
expected_decision_dct = dictionary with expected form of material in the format of "name":"format name"
additional refined_lookup for change/no change
"""
#Special Ore cases
CopperOreWins = ({'Copper Ore': [0, 5]},
{'Copper Ingot': [0, 10],'Bronze Ingot': [0, 11.6]},
1,
{'Copper Ore': 5},
{'Copper Ore': 'raw'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
CopperIngotWins = ({'Copper Ore': [0, 1]},
{'Copper Ingot': [0, 12],'Bronze Ingot': [0, 2]},
1,
{'Copper Ore': 6},
{'Copper Ore': 'C Ingot'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
BronzeIngotWins = ({'Copper Ore': [0, 2]},
{'Copper Ingot': [0, 3],'Bronze Ingot': [0, 17.6]},
1,
{'Copper Ore': 8},
{'Copper Ore': 'B Ingot'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Bronze Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
IronOreWins = ({'Iron Ore': [0, 10]},
{'Iron Ingot': [0, 30], 'Steel Ingot': [0, 46]},
1,
{'Iron Ore': 10},
{'Iron Ore': 'raw'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
IronIngotWins = ({'Iron Ore': [0, 6]},
{'Iron Ingot': [0, 173], 'Steel Ingot': [0, 50]},
1,
{'Iron Ore': 57.6667},
{'Iron Ore': 'I Ingot'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
SteelIngotWins = ({'Iron Ore': [0, 99]},
{'Iron Ingot': [0, 100], 'Steel Ingot': [0, 316]},
1,
{'Iron Ore': 100},
{'Iron Ore': 'S Ingot'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Steel Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
PlatinumOreWins = ({'Platinum Ore': [0, 50]},
{'Platinum Ingot': [0, 100], 'Darksteel Ingot': [0, 148]},
1,
{'Platinum Ore': 50},
{'Platinum Ore': 'raw'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
PlatinumIngotWins = ({'Platinum Ore': [0, 50]},
{'Platinum Ingot': [0, 200], 'Darksteel Ingot': [0, 200]},
1,
{'Platinum Ore': 100},
{'Platinum Ore': 'P Ingot'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
DarksteelIngotWins = ({'Platinum Ore': [0, 10]},
{'Platinum Ingot': [0, 1], 'Darksteel Ingot': [0, 448]},
1,
{'Platinum Ore': 200},
{'Platinum Ore': 'D Ingot'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Darksteel Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
AncientWoodLogWins = ({'Ancient Wood Log': [0, 10]},
{'Ancient Wood Plank': [0, 30]},
1,
{'Ancient Wood Log': 10},
{'Ancient Wood Log': 'raw'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
AncientWoodPlankWins = ({'Ancient Wood Log': [0, 10]},
{'Ancient Wood Plank': [0, 150]},
1,
{'Ancient Wood Log': 50},
{'Ancient Wood Log': 'refined'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
SymbolOfControlWins = ({'Symbol Of Control': [0, 123]},
{},
1,
{'Symbol Of Control': 123},
{'Symbol Of Control': 'none'},
{'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'})
@pytest.mark.parametrize("test_unrefined_dct,test_refined_dct,test_buysell,expected_multiplier_dct,expected_decision_dct,expected_refined_lookup",[CopperOreWins,CopperIngotWins,BronzeIngotWins,IronOreWins,IronIngotWins,SteelIngotWins,PlatinumOreWins,PlatinumIngotWins,DarksteelIngotWins,AncientWoodLogWins,AncientWoodPlankWins,SymbolOfControlWins])
def test_generatingMultiplier(test_unrefined_dct,test_refined_dct,test_buysell,expected_multiplier_dct,expected_decision_dct,expected_refined_lookup,refined_scalar,refined_lookup):
multiplier_dct, decision_dct = calc_helpers.generate_multiplier(test_unrefined_dct,test_refined_dct,refined_scalar,refined_lookup,test_buysell)
assert multiplier_dct == expected_multiplier_dct
assert decision_dct == expected_decision_dct
assert refined_lookup == expected_refined_lookup
"""
The purpose of testing is to check that the math of dictionary multiplication and summation is working correctly
Test cases:
TP cut is True
TP cut is False
"""
#The math on this is funky because decimal place rounding needs to be tested here too
TPcutFalse = ({"a":2.22222,"b":1.12345},
{"a":4,"b":3.33333},
False,
{"a":8.8889,"b":3.7448},
12.6337)
TPcutTrue = ({"a":2,"b":3},
{"a":1,"b":9},
True,
{"a":1.7,"b":22.95},
24.65)
#Only basic calculation check is required
#Currently only uses basic math cases to check
@pytest.mark.parametrize("test_droprate,test_multiplier,test_TPcut,expected_salvageValues,expected_salvageSum",[TPcutFalse,TPcutTrue])
def test_sell(test_droprate,test_multiplier,test_TPcut,expected_salvageValues,expected_salvageSum):
salvageValue_dct,sum_val = calc_helpers.compute_result(test_droprate,test_multiplier,test_TPcut)
assert salvageValue_dct == expected_salvageValues
assert sum_val == expected_salvageSum
| 100.563981
| 362
| 0.64183
| 2,654
| 21,219
| 5.094197
| 0.097965
| 0.057249
| 0.063462
| 0.03003
| 0.743787
| 0.73713
| 0.724778
| 0.720636
| 0.720636
| 0.715607
| 0
| 0.0138
| 0.224799
| 21,219
| 210
| 363
| 101.042857
| 0.808134
| 0.02248
| 0
| 0.417808
| 0
| 0
| 0.611482
| 0.010491
| 0
| 0
| 0
| 0
| 0.034247
| 1
| 0.027397
| false
| 0
| 0.013699
| 0
| 0.054795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
966629f178a92965713002ce2c0425ecf8583a45
| 50
|
py
|
Python
|
tests/test_func.py
|
raharth/PyMatch
|
93cf10fd9ca0fa104b0f2a30e613f75fd0561b92
|
[
"MIT"
] | 10
|
2021-07-06T17:26:17.000Z
|
2022-01-11T13:02:20.000Z
|
tests/test_func.py
|
raharth/PyMatch
|
93cf10fd9ca0fa104b0f2a30e613f75fd0561b92
|
[
"MIT"
] | 105
|
2020-10-02T13:01:17.000Z
|
2021-07-27T19:06:38.000Z
|
tests/test_func.py
|
raharth/PyMatch
|
93cf10fd9ca0fa104b0f2a30e613f75fd0561b92
|
[
"MIT"
] | null | null | null |
def foo():
print('Function import successful')
| 25
| 39
| 0.7
| 6
| 50
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 39
| 25
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.509804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
968f6c541c8384c905f833ec804ec3de85f349f1
| 16,798
|
py
|
Python
|
events/tests.py
|
lordoftheflies/Django-CRM
|
6e6b4b95811df00efdcdbc6040105d6b38b740b9
|
[
"MIT"
] | 1
|
2020-03-06T09:51:29.000Z
|
2020-03-06T09:51:29.000Z
|
events/tests.py
|
lordoftheflies/Krynegger-CRM
|
6e6b4b95811df00efdcdbc6040105d6b38b740b9
|
[
"MIT"
] | null | null | null |
events/tests.py
|
lordoftheflies/Krynegger-CRM
|
6e6b4b95811df00efdcdbc6040105d6b38b740b9
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.urls import reverse
from common.models import Address, Attachments, Comment, User
from contacts.models import Contact
from events.models import Event
from teams.models import Teams
class EventObjectTest(object):
def setUp(self):
self.user = User.objects.create(
first_name="johnEvent", username='johnDoeEvent', email='johnDoeEvent@example.com', role='ADMIN')
self.user.set_password('password')
self.user.save()
self.user1 = User.objects.create(
first_name="janeEvent",
username='janeDoeEvent',
email='janeDoeEvent@example.com',
role="USER",
has_sales_access=True)
self.user1.set_password('password')
self.user1.save()
self.user2 = User.objects.create(
first_name="joeEvent",
username='joeEvent',
email='joeEvent@example.com',
role="USER",
has_sales_access=True)
self.user2.set_password('password')
self.user2.save()
self.team_dev = Teams.objects.create(name='events teams')
self.team_dev.users.add(self.user2.id)
self.contact = Contact.objects.create(
first_name="contact event",
email="contact@event.com",
phone="12345",
description="contact",
created_by=self.user1)
self.contact.assigned_to.add(self.user1)
self.event = Event.objects.create(
name='event object test',
event_type='Non-Recurring',
start_date=(datetime.now()).strftime('%Y-%m-%d'),
start_time=(datetime.now()).strftime('%H:%M:%S'),
end_date=(datetime.now()).strftime('%Y-%m-%d'),
end_time=(datetime.now() + timedelta(hours=2)
).strftime('%H:%M:%S'),
description='non recurring event',
created_by=self.user,
date_of_meeting=(datetime.now()).strftime('%Y-%m-%d'),
)
self.event.contacts.add(self.contact)
self.event.assigned_to.add(self.user.id, self.user1.id)
self.event_1 = Event.objects.create(
name='event object test check',
event_type='Non-Recurring',
start_date=(datetime.now()).strftime('%Y-%m-%d'),
start_time=(datetime.now()).strftime('%H:%M:%S'),
end_date=(datetime.now()).strftime('%Y-%m-%d'),
end_time=(datetime.now() + timedelta(hours=2)
).strftime('%H:%M:%S'),
description='non recurring event',
created_by=self.user1,
)
self.event_1.contacts.add(self.contact)
self.event_1.assigned_to.add(self.user2.id)
self.comment = Comment.objects.create(
comment='test comment', event=self.event,
commented_by=self.user
)
self.attachment = Attachments.objects.create(
attachment='image.png', event=self.event,
created_by=self.user
)
class EventListTestCase(EventObjectTest, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_events_list(self):
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.get(reverse('events:events_list'))
self.assertEqual(response.status_code, 200)
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.get(reverse('events:events_list'))
self.assertEqual(response.status_code, 200)
data = {
'event_name': 'event name',
'created_by': self.user.id,
'assigned_to': self.user1.id,
'date_of_meeting': (datetime.now()).strftime('%Y-%m-%d'),
}
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.post(reverse('events:events_list'), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.post(reverse('events:events_list'), data)
self.assertEqual(response.status_code, 200)
class EventCreateTestCase(EventObjectTest, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_events_create(self):
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.get(reverse('events:event_create'))
self.assertEqual(response.status_code, 200)
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.get(reverse('events:event_create'))
self.assertEqual(response.status_code, 200)
data = {
'event_name': 'event name',
'event_type': 'Non-Recurring',
'contacts': self.contact.id,
'teams': self.team_dev.id,
'assigned_to': self.user1.id,
'start_date': (datetime.now()).strftime('%Y-%m-%d'),
'start_time': (datetime.now()).strftime('%H:%M:%S'),
'end_date': (datetime.now()).strftime('%Y-%m-%d'),
'end_time': (datetime.now() + timedelta(hours=2)).strftime('%H:%M:%S'),
}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'name': 'event name'}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'event_type': 'Recurring', 'recurring_days': [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'name': 'recurring event test', 'end_date': (datetime.now() + timedelta(days=7)).strftime('%Y-%m-%d'),
'start_date': (datetime.now()).strftime('%Y-%m-%d'), }
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'event_type': 'Recurring', 'recurring_days': []}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'name': 'recurring event test', 'start_date': (datetime.now() + timedelta(days=7)).strftime('%Y-%m-%d'),
'end_date': (datetime.now()).strftime('%Y-%m-%d'), }
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'event_type': 'Recurring', 'start_date': ''}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'event_type': 'Recurring', 'start_time': ''}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'event_type': 'Recurring',
'start_time': (datetime.now() + timedelta(hours=2)).strftime('%H:%M:%S'),
'end_time': (datetime.now()).strftime('%H:%M:%S'),
}
response = self.client.post(reverse('events:event_create'), data)
self.assertEqual(response.status_code, 200)
class EventDetailTestCase(EventObjectTest, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_events_detail(self):
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:detail_view', args=(self.event.id,)))
self.assertEqual(response.status_code, 200)
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:detail_view', args=(self.event.id,)))
self.assertEqual(response.status_code, 200)
response = self.client.get(
reverse('events:detail_view', args=(self.event_1.id,)))
self.assertEqual(response.status_code, 200)
self.client.login(email='joeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:detail_view', args=(self.event.id,)))
self.assertEqual(response.status_code, 403)
class EventEditTestCase(EventObjectTest, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_events_edit(self):
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:event_update', args=(self.event.id,)))
self.assertEqual(response.status_code, 200)
self.client.login(email='joeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:event_update', args=(self.event.id,)))
self.assertEqual(response.status_code, 403)
data = {
'event_name': 'event name',
'event_type': 'Non-Recurring',
'contacts': self.contact.id,
'teams': self.team_dev.id,
'assigned_to': self.user1.id,
'start_date': (datetime.now()).strftime('%Y-%m-%d'),
'start_time': (datetime.now()).strftime('%H:%M:%S'),
'end_date': (datetime.now()).strftime('%Y-%m-%d'),
'end_time': (datetime.now() + timedelta(hours=2)).strftime('%H:%M:%S'),
}
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:event_update', args=(self.event.id,)), data)
self.assertEqual(response.status_code, 200)
data = {**data, 'name': 'event object test edit'}
response = self.client.post(
reverse('events:event_update', args=(self.event.id,)), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='joeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:event_update', args=(self.event.id,)), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='johnDoeEvent@example.com',
password='password')
new_data = {
'name': 'event name edit object',
'event_type': 'Recurring',
'contacts': self.contact.id,
'teams': self.team_dev.id,
'assigned_to': self.user1.id, 'end_date': (datetime.now() + timedelta(days=7)).strftime('%Y-%m-%d'),
'end_time': (datetime.now() + timedelta(hours=2)).strftime('%H:%M:%S'),
'start_time': (datetime.now()).strftime('%H:%M:%S'),
'start_date': (datetime.now()).strftime('%Y-%m-%d'),
'recurring_days': [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
}
response = self.client.post(
reverse('events:event_update', args=(self.event.id,)), new_data)
self.assertEqual(response.status_code, 200)
class AddCommentTestCase(EventObjectTest, TestCase):
def test_event_add_comment(self):
self.client.login(email='johnDoeEvent@example.com',
password='password')
data = {
'comment': '',
'event_id': self.event.id,
}
response = self.client.post(
reverse('events:add_comment'), data)
self.assertEqual(response.status_code, 200)
data = {
'comment': 'test comment event',
'event_id': self.event.id,
}
response = self.client.post(
reverse('events:add_comment'), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:add_comment'), data)
self.assertEqual(response.status_code, 200)
class UpdateCommentTestCase(EventObjectTest, TestCase):
def test_event_update_comment(self):
self.client.login(email='johnDoeEvent@example.com',
password='password')
data = {
'commentid': self.comment.id,
'event_id': self.event.id,
'comment': ''
}
response = self.client.post(
reverse('events:edit_comment'), data)
self.assertEqual(response.status_code, 200)
data = {
'comment': 'test comment',
'commentid': self.comment.id,
'event_id': self.event.id,
}
response = self.client.post(
reverse('events:edit_comment'), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:edit_comment'), data)
self.assertEqual(response.status_code, 200)
class DeleteCommentTestCase(EventObjectTest, TestCase):
def test_event_delete_comment(self):
data = {
'comment_id': self.comment.id,
}
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:remove_comment'), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:remove_comment'), data)
self.assertEqual(response.status_code, 200)
class AddAttachmentTestCase(EventObjectTest, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_event_add_attachment(self):
data = {
'attachment': SimpleUploadedFile('file_name.txt', bytes('file contents.', 'utf-8')),
'event_id': self.event.id
}
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:add_attachment'), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:add_attachment'), data)
self.assertEqual(response.status_code, 200)
data = {
'attachment': '',
'event_id': self.event.id
}
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:add_attachment'), data)
self.assertEqual(response.status_code, 200)
class DeleteAttachmentTestCase(EventObjectTest, TestCase):
def test_invoice_delete_attachment(self):
data = {
'attachment_id': self.attachment.id,
}
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:remove_attachment'), data)
self.assertEqual(response.status_code, 200)
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.post(
reverse('events:remove_attachment'), data)
self.assertEqual(response.status_code, 200)
class EventDeleteTestCase(EventObjectTest, TestCase):
def test_events_delete(self):
self.client.login(email='janeDoeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:event_delete', args=(self.event.id,)))
self.assertEqual(response.status_code, 403)
self.client.login(email='johnDoeEvent@example.com',
password='password')
response = self.client.get(
reverse('events:event_delete', args=(self.event.id,)))
self.assertEqual(response.status_code, 302)
| 39.524706
| 128
| 0.592273
| 1,798
| 16,798
| 5.423804
| 0.081758
| 0.068704
| 0.073831
| 0.11895
| 0.832342
| 0.803733
| 0.795016
| 0.78589
| 0.77297
| 0.755845
| 0
| 0.012531
| 0.263662
| 16,798
| 424
| 129
| 39.617925
| 0.775891
| 0
| 0
| 0.644509
| 0
| 0
| 0.197178
| 0.046258
| 0
| 0
| 0
| 0
| 0.115607
| 1
| 0.031792
| false
| 0.086705
| 0.026012
| 0
| 0.089595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
73660fc02ccc1326fbe7a784d6f8131b7a698148
| 4,750
|
py
|
Python
|
test/test_helper.py
|
jayrshah98/GITS2.1-I.R.I.S
|
2891ba27b3309bbc7e8ff25ed221d3f1c78fb9d3
|
[
"MIT"
] | 1
|
2021-11-28T12:18:43.000Z
|
2021-11-28T12:18:43.000Z
|
test/test_helper.py
|
jayrshah98/GITS2.1-I.R.I.S
|
2891ba27b3309bbc7e8ff25ed221d3f1c78fb9d3
|
[
"MIT"
] | 20
|
2021-11-26T17:59:00.000Z
|
2022-01-29T10:44:15.000Z
|
test/test_helper.py
|
jayrshah98/GITS2.1-I.R.I.S
|
2891ba27b3309bbc7e8ff25ed221d3f1c78fb9d3
|
[
"MIT"
] | 3
|
2021-11-28T21:48:50.000Z
|
2022-01-05T15:44:06.000Z
|
# import os
# import sys
# sys.path.insert(1, os.getcwd())
# from helper import get_current_branch, get_repo_name
# from mock import patch, Mock
# @patch("subprocess.Popen")
# def test_get_repo_name_happy_case(mock_var):
# """
# Function to test fetching repo name and branch name, success case
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('output'.encode('UTF-8'), 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_repo_name()
# test_result_username = test_result[0][15:]
# test_result_repo = test_result[1][:-4]
# # assert "hrushabhchouhan" == test_result_username, "Normal case"
# assert "testrepo" == test_result_repo, "Normal case"
# @patch("subprocess.Popen")
# def test_get_repo_name_happy_case_2(mock_var):
# """
# Function to test fetching repo name and user name, success case
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('output'.encode('UTF-8'), 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_repo_name()
# assert type(test_result[0]) == str, "Normal case"
# assert type(test_result[1]) == str, "Normal case"
# @patch("subprocess.Popen")
# def test_get_repo_name_happy_case_3(mock_var):
# """
# Function to test fetching repo name and user name, success case
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('output'.encode('UTF-8'), 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_repo_name()
# assert type(test_result) == list, "Normal case"
# @patch("subprocess.Popen")
# def test_get_repo_name_sad_case(mock_var):
# """
# Function to test fetching repo name and branch name, failure case
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('output', 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_repo_name()
# if not test_result:
# assert False
# else:
# assert True
# @patch("subprocess.Popen")
# def test_get_current_branch_sad_case(mock_var):
# """
# Function to test fetching current branch, success case
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('output', 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_current_branch()
# if not test_result:
# assert True
# else:
# assert False
# @patch("subprocess.Popen")
# def test_get_trunk_branch_happy_case_master_branch(mock_var):
# """
# Function to test fetching main branch, success case when trunk branch='master'
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('master'.encode('UTF-8'), 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_current_branch()
# assert "master" == test_result, "Normal case"
# @patch("subprocess.Popen")
# def test_get_trunk_branch_happy_case_main_branch(mock_var):
# """
# Function to test fetching main branch, success case when trunk branch='main'
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('main'.encode('UTF-8'), 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_current_branch()
# assert "main" == test_result, "Normal case"
# @patch("subprocess.Popen")
# def test_get_trunk_branch_happy_case_other_branch(mock_var):
# """
# Function to test fetching main branch, success case when trunk branch is any other branch
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('branch'.encode('UTF-8'), 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_current_branch()
# assert "branch" == test_result, "Normal case"
# @patch("subprocess.Popen")
# def test_get_trunk_branch_sad_case(mock_var):
# """
# Function to test fetching main branch, failure case
# """
# mocked_pipe = Mock()
# attrs = {'communicate.return_value': ('branch', 'error'), 'returncode': 0}
# mocked_pipe.configure_mock(**attrs)
# mock_var.return_value = mocked_pipe
# test_result = get_current_branch()
# if not test_result:
# assert True
# else:
# assert False
| 31.879195
| 96
| 0.656
| 596
| 4,750
| 4.934564
| 0.11745
| 0.091806
| 0.033662
| 0.070384
| 0.867732
| 0.860592
| 0.850391
| 0.822509
| 0.800068
| 0.753485
| 0
| 0.0066
| 0.202526
| 4,750
| 148
| 97
| 32.094595
| 0.769799
| 0.938737
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7374c9778186578163fedca2094b253baab02913
| 5,084
|
py
|
Python
|
conans/test/unittests/client/tools/oss/cross_building_test.py
|
matthiasng/conan
|
634eadc319da928084633a344d42785edccb8d6c
|
[
"MIT"
] | 6,205
|
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/test/unittests/client/tools/oss/cross_building_test.py
|
matthiasng/conan
|
634eadc319da928084633a344d42785edccb8d6c
|
[
"MIT"
] | 8,747
|
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/test/unittests/client/tools/oss/cross_building_test.py
|
Mattlk13/conan
|
005fc53485557b0a570bb71670f2ca9c66082165
|
[
"MIT"
] | 961
|
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
# -*- coding: utf-8 -*-
import unittest
from mock import mock
from conans.client.conf import get_default_settings_yml
from conans.client.tools.oss import cross_building
from conans.model.settings import Settings
# TODO: Add tests using a conanfile with 'settings' and 'settings_build'
class CrossBuildingTest(unittest.TestCase):
def test_same(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "FreeBSD"
settings.arch = "x86_64"
self.assertFalse(cross_building(settings, self_os="FreeBSD", self_arch="x86_64"))
with mock.patch("platform.system", mock.MagicMock(return_value='FreeBSD')),\
mock.patch("platform.machine", mock.MagicMock(return_value="x86_64")):
self.assertFalse(cross_building(settings))
settings.os_build = "FreeBSD"
settings.arch = "x86_64"
self.assertFalse(cross_building(settings))
def test_different_os(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "Linux"
settings.arch = "x86_64"
self.assertTrue(cross_building(settings, self_os="FreeBSD", self_arch="x86_64"))
with mock.patch("platform.system", mock.MagicMock(return_value='FreeBSD')),\
mock.patch("platform.machine", mock.MagicMock(return_value="x86_64")):
self.assertTrue(cross_building(settings))
settings.os_build = "FreeBSD"
settings.arch_build = "x86_64"
self.assertTrue(cross_building(settings))
def test_different_arch(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "FreeBSD"
settings.arch = "x86"
self.assertTrue(cross_building(settings, self_os="FreeBSD", self_arch="x86_64"))
with mock.patch("platform.system", mock.MagicMock(return_value='FreeBSD')), \
mock.patch("platform.machine", mock.MagicMock(return_value="x86_64")):
self.assertTrue(cross_building(settings))
settings.os_build = "FreeBSD"
settings.arch_build = "x86_64"
self.assertTrue(cross_building(settings))
def test_x64_x86(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "FreeBSD"
settings.arch = "x86"
self.assertFalse(cross_building(settings, self_os="FreeBSD",
self_arch="x86_64", skip_x64_x86=True))
self.assertTrue(cross_building(settings, self_os="FreeBSD",
self_arch="x86_64", skip_x64_x86=False))
with mock.patch("platform.system", mock.MagicMock(return_value='FreeBSD')), \
mock.patch("platform.machine", mock.MagicMock(return_value="x86_64")):
self.assertFalse(cross_building(settings, skip_x64_x86=True))
self.assertTrue(cross_building(settings, skip_x64_x86=False))
settings.os_build = "FreeBSD"
settings.arch_build = "x86_64"
self.assertFalse(cross_building(settings, skip_x64_x86=True))
self.assertTrue(cross_building(settings, skip_x64_x86=False))
def test_x86_x64(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "FreeBSD"
settings.arch = "x86_64"
self.assertTrue(cross_building(settings, self_os="FreeBSD",
self_arch="x86", skip_x64_x86=True))
self.assertTrue(cross_building(settings, self_os="FreeBSD",
self_arch="x86", skip_x64_x86=False))
with mock.patch("platform.system", mock.MagicMock(return_value='FreeBSD')), \
mock.patch("platform.machine", mock.MagicMock(return_value="x86")):
self.assertTrue(cross_building(settings, skip_x64_x86=True))
self.assertTrue(cross_building(settings, skip_x64_x86=False))
settings.os_build = "FreeBSD"
settings.arch_build = "x86"
self.assertTrue(cross_building(settings, skip_x64_x86=True))
self.assertTrue(cross_building(settings, skip_x64_x86=False))
def test_x86_64_different_os(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "Linux"
settings.arch = "x86"
self.assertTrue(cross_building(settings, self_os="FreeBSD",
self_arch="x86_64", skip_x64_x86=True))
self.assertTrue(cross_building(settings, self_os="FreeBSD",
self_arch="x86_64", skip_x64_x86=False))
with mock.patch("platform.system", mock.MagicMock(return_value='FreeBSD')), \
mock.patch("platform.machine", mock.MagicMock(return_value="x86_64")):
self.assertTrue(cross_building(settings, skip_x64_x86=True))
self.assertTrue(cross_building(settings, skip_x64_x86=False))
settings.os_build = "FreeBSD"
settings.arch_build = "x86_64"
self.assertTrue(cross_building(settings, skip_x64_x86=True))
self.assertTrue(cross_building(settings, skip_x64_x86=False))
| 44.99115
| 89
| 0.65952
| 598
| 5,084
| 5.339465
| 0.093645
| 0.113999
| 0.177576
| 0.177576
| 0.909803
| 0.909803
| 0.904792
| 0.904792
| 0.904792
| 0.900094
| 0
| 0.045362
| 0.22384
| 5,084
| 112
| 90
| 45.392857
| 0.763811
| 0.018096
| 0
| 0.793103
| 0
| 0
| 0.102626
| 0
| 0
| 0
| 0
| 0.008929
| 0.310345
| 1
| 0.068966
| false
| 0
| 0.057471
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
73a5d71ecd52823213fcaea65cda5166d3739d91
| 118
|
py
|
Python
|
7KYU/highest_value.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/highest_value.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/highest_value.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
def highest_value(a: str, b: str) -> str:
return a if sum(ord(i) for i in a) >= sum(ord(i) for i in b) else b
| 39.333333
| 71
| 0.584746
| 27
| 118
| 2.518519
| 0.518519
| 0.176471
| 0.205882
| 0.294118
| 0.382353
| 0.382353
| 0
| 0
| 0
| 0
| 0
| 0
| 0.262712
| 118
| 3
| 72
| 39.333333
| 0.781609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
73ce72fd5a28f4c6b9be09db423d61569fbd237c
| 3,848
|
py
|
Python
|
sleeper_analyzer/sleeper_api.py
|
Saciotto/Sleeper-football-fantasy-analyzer
|
f49d51c6286b7291a6253180f0522d99db820228
|
[
"MIT"
] | null | null | null |
sleeper_analyzer/sleeper_api.py
|
Saciotto/Sleeper-football-fantasy-analyzer
|
f49d51c6286b7291a6253180f0522d99db820228
|
[
"MIT"
] | null | null | null |
sleeper_analyzer/sleeper_api.py
|
Saciotto/Sleeper-football-fantasy-analyzer
|
f49d51c6286b7291a6253180f0522d99db820228
|
[
"MIT"
] | null | null | null |
import json
from urllib.request import Request, urlopen
class SleeperAPI:
def __init__(self, timeout=5):
self._timeout = timeout
def _get(self, url):
request = Request(url, method='GET')
with urlopen(request, timeout=self._timeout) as response:
return json.load(response)
def get_user(self, user_name):
url = 'https://api.sleeper.app/v1/user/{}'.format(user_name)
return self._get(url)
def get_all_leagues_for_user(self, user_id, season):
url = 'https://api.sleeper.app/v1/user/{}/leagues/nfl/{}'.format(user_id, season)
return self._get(url)
def get_league(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}'.format(league_id)
return self._get(url)
def get_rosters(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}/rosters'.format(league_id)
return self._get(url)
def get_users_in_league(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}/users'.format(league_id)
return self._get(url)
def get_league_matchups(self, league_id, week):
url = 'https://api.sleeper.app/v1/league/{}/matchups/{}'.format(league_id, week)
return self._get(url)
def get_playoffs_winners_brackets(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}/winners_bracket'.format(league_id)
return self._get(url)
def get_playoffs_losers_brackets(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}/loses_bracket'.format(league_id)
return self._get(url)
def get_transactions(self, league_id, week):
url = 'https://api.sleeper.app/v1/league/{}/transactions/{}'.format(league_id, week)
return self._get(url)
def get_traded_picks(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}/traded_picks'.format(league_id)
return self._get(url)
def get_nfl_state(self):
url = 'https://api.sleeper.app/v1/state/nfl'
return self._get(url)
def get_drafts_for_user(self, user_id, season):
url = 'https://api.sleeper.app/v1/user/{}/drafts/nfl/{}'.format(user_id, season)
return self._get(url)
def get_drafts_for_league(self, league_id):
url = 'https://api.sleeper.app/v1/league/{}/drafts'.format(league_id)
return self._get(url)
def get_draft(self, draft_id):
url = 'https://api.sleeper.app/v1/draft/{}'.format(draft_id)
return self._get(url)
def get_draft_picks(self, draft_id):
url = 'https://api.sleeper.app/v1/draft/{}/picks'.format(draft_id)
return self._get(url)
def get_draft_traded_picks(self, draft_id):
url = 'https://api.sleeper.app/v1/draft/{}/traded_picks'.format(draft_id)
return self._get(url)
def get_all_players(self):
url = 'https://api.sleeper.app/v1/players/nfl'
return self._get(url)
def get_add_trending_players(self, lookback_hours=24, limit=25):
url = 'https://api.sleeper.app/v1/players/nfl/trending/add?lookback_hours={}&limit={}' \
.format(lookback_hours, limit)
return self._get(url)
def get_drop_trending_players(self, lookback_hours=24, limit=25):
url = 'https://api.sleeper.app/v1/players/nfl/trending/drop?lookback_hours={}&limit={}' \
.format(lookback_hours, limit)
return self._get(url)
def get_player_projections(self, player_id):
url = 'https://api.sleeper.app/projections/nfl/player/{}?season_type=regular&season=2021&grouping=week' \
.format(player_id)
return self._get(url)
def get_player_statistics(self, player_id):
url = 'https://api.sleeper.app/stats/nfl/player/{}?season_type=regular&season=2021&grouping=week' \
.format(player_id)
return self._get(url)
| 38.09901
| 113
| 0.654886
| 539
| 3,848
| 4.450835
| 0.12987
| 0.055023
| 0.09629
| 0.157566
| 0.806586
| 0.806586
| 0.806586
| 0.743643
| 0.684869
| 0.609837
| 0
| 0.011624
| 0.195166
| 3,848
| 100
| 114
| 38.48
| 0.762996
| 0
| 0
| 0.328947
| 0
| 0.052632
| 0.283004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.302632
| false
| 0
| 0.026316
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
73ed22672796c1c749870704f272b9b21235d363
| 269
|
py
|
Python
|
electric_units/__init__.py
|
ample-tech/electric-units
|
da5aa44f9f2b25e558928de3e883199a4be5428b
|
[
"MIT"
] | 1
|
2020-05-19T15:34:17.000Z
|
2020-05-19T15:34:17.000Z
|
electric_units/__init__.py
|
ample-tech/electric-units
|
da5aa44f9f2b25e558928de3e883199a4be5428b
|
[
"MIT"
] | 6
|
2020-01-24T16:29:48.000Z
|
2020-02-26T15:52:47.000Z
|
electric_units/__init__.py
|
ample-tech/electric-units
|
da5aa44f9f2b25e558928de3e883199a4be5428b
|
[
"MIT"
] | 1
|
2020-02-07T14:40:47.000Z
|
2020-02-07T14:40:47.000Z
|
"""Package exports."""
from electric_units.electrical_energy import ElectricalEnergy
from electric_units.nem_settlement_period import NemSettlementPeriod
from electric_units.nem_dispatch_period import NemDispatchPeriod
from electric_units.watt_sample import WattSample
| 44.833333
| 68
| 0.892193
| 32
| 269
| 7.1875
| 0.5625
| 0.208696
| 0.295652
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066915
| 269
| 5
| 69
| 53.8
| 0.916335
| 0.05948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fb915f69069e9faf1fe6bb6e4c2d387be4a9069a
| 94
|
py
|
Python
|
simple_concurrency_with_amqp/task.py
|
allenling/simple-concurrency-with-amqp
|
2202c2793094e2d1abff63b11487457dadcbc6e6
|
[
"MIT"
] | null | null | null |
simple_concurrency_with_amqp/task.py
|
allenling/simple-concurrency-with-amqp
|
2202c2793094e2d1abff63b11487457dadcbc6e6
|
[
"MIT"
] | null | null | null |
simple_concurrency_with_amqp/task.py
|
allenling/simple-concurrency-with-amqp
|
2202c2793094e2d1abff63b11487457dadcbc6e6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals
from __future__ import absolute_import
| 23.5
| 39
| 0.861702
| 13
| 94
| 5.461538
| 0.692308
| 0.28169
| 0.450704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.106383
| 94
| 3
| 40
| 31.333333
| 0.833333
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fbc014e34694b377fb898906d4325977faa66931
| 118
|
bzl
|
Python
|
debian/patch.bzl
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | 39
|
2021-06-18T03:22:30.000Z
|
2022-03-21T15:23:43.000Z
|
debian/patch.bzl
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | 10
|
2021-06-18T03:22:19.000Z
|
2022-03-18T22:14:15.000Z
|
debian/patch.bzl
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | 4
|
2021-08-19T19:20:04.000Z
|
2022-03-08T07:33:18.000Z
|
files = {
"patch_2.7.5-1+deb8u1_amd64.deb": "5272a26273fd799ec1ec74db0e01df5883abbdf8b7e343ad28227295f660c35d",
}
| 29.5
| 105
| 0.79661
| 10
| 118
| 9.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0.084746
| 118
| 3
| 106
| 39.333333
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0.79661
| 0.79661
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbc2283a3f5a8fdbc85cdfadb5cd02969319dc18
| 175
|
py
|
Python
|
codeit/02_hash_function_remainer.py
|
ysong0504/algorithm
|
76bfef16bd5bdc13b7ad260ee5f4e9dc62316403
|
[
"MIT"
] | null | null | null |
codeit/02_hash_function_remainer.py
|
ysong0504/algorithm
|
76bfef16bd5bdc13b7ad260ee5f4e9dc62316403
|
[
"MIT"
] | null | null | null |
codeit/02_hash_function_remainer.py
|
ysong0504/algorithm
|
76bfef16bd5bdc13b7ad260ee5f4e9dc62316403
|
[
"MIT"
] | null | null | null |
def hash_fucntion_remainder (key, array_size):
# 나누기 몫을 이용하여 key를 0 ~ array_size ~ 1 범위의 자연수로 바꿔주는 함수
return key % array_size
print(hash_fucntion_remainder(2509, 40))
| 35
| 58
| 0.742857
| 29
| 175
| 4.241379
| 0.724138
| 0.219512
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055944
| 0.182857
| 175
| 5
| 59
| 35
| 0.804196
| 0.297143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
fbdaf23ee284b8b69972704b24cd8cf1157841c4
| 97
|
py
|
Python
|
models/__init__.py
|
abidikhairi/iot_edge_classification
|
fcf2e8ef62dd9129bc6e15711e9b810a3a1faebc
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
abidikhairi/iot_edge_classification
|
fcf2e8ef62dd9129bc6e15711e9b810a3a1faebc
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
abidikhairi/iot_edge_classification
|
fcf2e8ef62dd9129bc6e15711e9b810a3a1faebc
|
[
"MIT"
] | null | null | null |
from models.baseline import GCN
from models.baseline import GAT
from models.baseline import SAGE
| 24.25
| 32
| 0.845361
| 15
| 97
| 5.466667
| 0.466667
| 0.365854
| 0.658537
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123711
| 97
| 3
| 33
| 32.333333
| 0.964706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
839b141270db8da981c7b9d94e24888873c388de
| 91
|
py
|
Python
|
biblepaycentral/settings/__init__.py
|
Lichtsucher/biblepaycentral
|
0575c9c6851bd87d35d8cda9840301ee510698e8
|
[
"MIT"
] | 3
|
2018-03-18T22:36:20.000Z
|
2020-02-13T15:52:25.000Z
|
biblepaycentral/settings/__init__.py
|
Lichtsucher/biblepaycentral
|
0575c9c6851bd87d35d8cda9840301ee510698e8
|
[
"MIT"
] | null | null | null |
biblepaycentral/settings/__init__.py
|
Lichtsucher/biblepaycentral
|
0575c9c6851bd87d35d8cda9840301ee510698e8
|
[
"MIT"
] | 1
|
2018-10-16T10:51:11.000Z
|
2018-10-16T10:51:11.000Z
|
from biblepaycentral.settings.generic import *
from biblepaycentral.settings.local import *
| 45.5
| 46
| 0.857143
| 10
| 91
| 7.8
| 0.6
| 0.487179
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 91
| 2
| 47
| 45.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
83dd6a09639b48c1cf42a7c014c0c71c8b9dff3f
| 6,854
|
py
|
Python
|
src/tests/featurization/expected/featurization_expected_mat.py
|
panpiort8/huggingmolecules-1
|
7caf9bb355db86a0d0e8423088c4328770b4db0d
|
[
"Apache-2.0"
] | 1
|
2021-11-04T03:06:08.000Z
|
2021-11-04T03:06:08.000Z
|
src/tests/featurization/expected/featurization_expected_mat.py
|
gabegomes/huggingmolecules
|
adc581c97fbc21d9967dd9334afa94b22fb77651
|
[
"Apache-2.0"
] | null | null | null |
src/tests/featurization/expected/featurization_expected_mat.py
|
gabegomes/huggingmolecules
|
adc581c97fbc21d9967dd9334afa94b22fb77651
|
[
"Apache-2.0"
] | null | null | null |
from huggingmolecules.featurization.featurization_mat import MatBatchEncoding, MatMoleculeEncoding
from numpy.ma import array
from torch import FloatTensor
expected_encoded_smiles = [
MatMoleculeEncoding(
node_features=array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]]),
adjacency_matrix=array([[0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 1., 1., 1.],
[0., 0., 0., 1., 1.]]),
distance_matrix=array([[1.00000000e+06, 1.00000000e+06, 1.00000000e+06, 1.00000000e+06, 1.00000000e+06],
[1.00000000e+06, 0.00000000e+00, 1.49726296e+00, 2.46955618e+00, 3.85851039e+00],
[1.00000000e+06, 1.49726296e+00, 0.00000000e+00, 1.33899509e+00, 2.46955663e+00],
[1.00000000e+06, 2.46955618e+00, 1.33899509e+00, 0.00000000e+00, 1.49726289e+00],
[1.00000000e+06, 3.85851039e+00, 2.46955663e+00, 1.49726289e+00, 0.00000000e+00]]),
y=None),
MatMoleculeEncoding(
node_features=array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]]),
adjacency_matrix=array([[0., 0., 0.],
[0., 1., 1.],
[0., 1., 1.]]),
distance_matrix=array([[1.00000000e+06, 1.00000000e+06, 1.00000000e+06],
[1.00000000e+06, 0.00000000e+00, 1.21945472e+00],
[1.00000000e+06, 1.21945472e+00, 0.00000000e+00]]),
y=None)]
expected_batch = MatBatchEncoding(
node_features=FloatTensor([[[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]],
[[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]]),
adjacency_matrix=FloatTensor([[[0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 1., 1., 1.],
[0., 0., 0., 1., 1.]],
[[0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]),
distance_matrix=FloatTensor([[[1.0000e+06, 1.0000e+06, 1.0000e+06, 1.0000e+06, 1.0000e+06],
[1.0000e+06, 0.0000e+00, 1.4973e+00, 2.4696e+00, 3.8585e+00],
[1.0000e+06, 1.4973e+00, 0.0000e+00, 1.3390e+00, 2.4696e+00],
[1.0000e+06, 2.4696e+00, 1.3390e+00, 0.0000e+00, 1.4973e+00],
[1.0000e+06, 3.8585e+00, 2.4696e+00, 1.4973e+00, 0.0000e+00]],
[[1.0000e+06, 1.0000e+06, 1.0000e+06, 0.0000e+00, 0.0000e+00],
[1.0000e+06, 0.0000e+00, 1.2195e+00, 0.0000e+00, 0.0000e+00],
[1.0000e+06, 1.2195e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00]]]),
y=None, batch_size=2)
| 77.886364
| 120
| 0.290487
| 1,045
| 6,854
| 1.891866
| 0.042105
| 0.593829
| 0.793627
| 0.92868
| 0.767324
| 0.705109
| 0.674254
| 0.657056
| 0.651998
| 0.617097
| 0
| 0.374358
| 0.432156
| 6,854
| 87
| 121
| 78.781609
| 0.133607
| 0
| 0
| 0.512195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.036585
| 0
| 0.036585
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83ed6b36c747d23473b8b08f3502258b8d539ec6
| 43
|
py
|
Python
|
tbn/population.py
|
RabidGuy/tbn
|
005abdadcaeccaf4a2737dc5c1b76870165bfa1a
|
[
"MIT"
] | null | null | null |
tbn/population.py
|
RabidGuy/tbn
|
005abdadcaeccaf4a2737dc5c1b76870165bfa1a
|
[
"MIT"
] | null | null | null |
tbn/population.py
|
RabidGuy/tbn
|
005abdadcaeccaf4a2737dc5c1b76870165bfa1a
|
[
"MIT"
] | null | null | null |
import member
class Population:
pass
| 7.166667
| 17
| 0.72093
| 5
| 43
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255814
| 43
| 5
| 18
| 8.6
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
83ff627f5833f3faf188eb17ee503a830dafff2d
| 12,632
|
py
|
Python
|
rvpvp/isa/rvv/vfmxxx_vv.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 5
|
2021-05-10T09:57:00.000Z
|
2021-10-05T14:39:20.000Z
|
rvpvp/isa/rvv/vfmxxx_vv.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | null | null | null |
rvpvp/isa/rvv/vfmxxx_vv.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 1
|
2021-05-14T20:24:11.000Z
|
2021-05-14T20:24:11.000Z
|
from ...isa.inst import *
import numpy as np
import ctypes
FE_TONEAREST = 0x0000
FE_DOWNWARD = 0x0400
FE_UPWARD = 0x0800
FE_TOWARDZERO = 0x0c00
libm = ctypes.CDLL('libm.so.6')
round_dict = { 0:FE_TONEAREST , 1:FE_TOWARDZERO , 2:FE_DOWNWARD , 3:FE_UPWARD }
INF_dict = {0: np.PINF, 1: np.NINF}
def muladd( a, b, c, l ):
result = np.zeros( l, dtype=a.dtype )
for no in range(0, l):
signProd = ( a[no] < 0 ) ^ ( b[no] < 0 )
if np.isnan( a[no] ) or np.isnan( b[no] ):
result[no] = np.nan
elif ( np.isinf(a[no]) and b[no] == 0 ) or ( a[no] == 0 and np.isinf(b[no]) ):
result[no] = np.nan
elif np.isinf(a[no] ) or np.isinf(b[no]):
if not np.isnan( c[no] ) and not np.isinf( c[no] ):
result[no] = INF_dict[signProd]
elif np.isnan( c[no] ):
result[no] = np.nan
elif ( c[no] < 0 ) == signProd:
result[no] = INF_dict[signProd]
else:
result[no] = np.nan
elif np.isnan( c[no] ):
result[no] = np.nan
elif np.isinf( c[no] ):
result[no] = c[no]
else:
result[no] = ( a[no].astype(np.float64) * b[no].astype(np.float64) + c[no] ).astype(result.dtype)
return result
class Vfmacc_vv(Inst):
name = 'vfmacc.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], self['vs2'][vstart:self['vl']], result[vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfnmacc_vv(Inst):
name = 'vfnmacc.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], - self['vs2'][vstart:self['vl']], - result[vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfmsac_vv(Inst):
name = 'vfmsac.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], self['vs2'][vstart:self['vl']], - result[vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfnmsac_vv(Inst):
name = 'vfnmsac.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], - self['vs2'][vstart:self['vl']], result[vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfmadd_vv(Inst):
name = 'vfmadd.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], result[vstart:self['vl']], self['vs2'][vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfnmadd_vv(Inst):
name = 'vfnmadd.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], - result[vstart:self['vl']], - self['vs2'][vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfmsub_vv(Inst):
name = 'vfmsub.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], result[vstart:self['vl']], - self['vs2'][vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
class Vfnmsub_vv(Inst):
name = 'vfnmsub.vv'
def golden(self):
if 'vs1' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = self['vd'].copy()
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
value = muladd( self['vs1'][vstart:self['vl']], - result[vstart:self['vl']], self['vs2'][vstart:self['vl']], self['vl'] - vstart )
result[vstart:self['vl']] = self.masked( value, result[vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
if ( self['frm'] == 1 or self['frm'] == 2 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isposinf( result[vstart:self['vl']] ), 65504, result[vstart:self['vl']] )
if ( self['frm'] == 1 or self['frm'] == 3 )and result.dtype == np.float16:
result[vstart:self['vl']] = np.where( np.isneginf( result[vstart:self['vl']] ), -65504, result[vstart:self['vl']] )
libm.fesetround( 0 )
return result
else:
return 0
| 35.886364
| 152
| 0.469759
| 1,505
| 12,632
| 3.924252
| 0.056478
| 0.176092
| 0.195056
| 0.219438
| 0.891974
| 0.88554
| 0.878429
| 0.878429
| 0.877243
| 0.877243
| 0
| 0.029419
| 0.364946
| 12,632
| 351
| 153
| 35.988604
| 0.706806
| 0
| 0
| 0.816
| 0
| 0
| 0.062465
| 0
| 0
| 0
| 0.0019
| 0
| 0
| 1
| 0.036
| false
| 0
| 0.012
| 0
| 0.212
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f7c5d069292bb93157b21cd0cc7603f64385c23f
| 24,349
|
py
|
Python
|
sdk/python/pulumi_azure/appservice/function_app.py
|
kenny-wealth/pulumi-azure
|
e57e3a81f95bf622e7429c53f0bff93e33372aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/appservice/function_app.py
|
kenny-wealth/pulumi-azure
|
e57e3a81f95bf622e7429c53f0bff93e33372aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/appservice/function_app.py
|
kenny-wealth/pulumi-azure
|
e57e3a81f95bf622e7429c53f0bff93e33372aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class FunctionApp(pulumi.CustomResource):
app_service_plan_id: pulumi.Output[str]
"""
The ID of the App Service Plan within which to create this Function App.
"""
app_settings: pulumi.Output[dict]
"""
A key-value pair of App Settings.
"""
auth_settings: pulumi.Output[dict]
"""
A `auth_settings` block as defined below.
* `activeDirectory` (`dict`)
* `allowedAudiences` (`list`)
* `client_id` (`str`)
* `client_secret` (`str`)
* `additionalLoginParams` (`dict`)
* `allowedExternalRedirectUrls` (`list`)
* `defaultProvider` (`str`)
* `enabled` (`bool`) - Is the Function App enabled?
* `facebook` (`dict`)
* `app_id` (`str`)
* `appSecret` (`str`)
* `oauthScopes` (`list`)
* `google` (`dict`)
* `client_id` (`str`)
* `client_secret` (`str`)
* `oauthScopes` (`list`)
* `issuer` (`str`)
* `microsoft` (`dict`)
* `client_id` (`str`)
* `client_secret` (`str`)
* `oauthScopes` (`list`)
* `runtimeVersion` (`str`)
* `tokenRefreshExtensionHours` (`float`)
* `tokenStoreEnabled` (`bool`)
* `twitter` (`dict`)
* `consumerKey` (`str`)
* `consumerSecret` (`str`)
* `unauthenticatedClientAction` (`str`)
"""
client_affinity_enabled: pulumi.Output[bool]
"""
Should the Function App send session affinity cookies, which route client requests in the same session to the same instance?
"""
connection_strings: pulumi.Output[list]
"""
An `connection_string` block as defined below.
* `name` (`str`) - The name of the Connection String.
* `type` (`str`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
* `value` (`str`) - The value for the Connection String.
"""
default_hostname: pulumi.Output[str]
"""
The default hostname associated with the Function App - such as `mysite.azurewebsites.net`
"""
enable_builtin_logging: pulumi.Output[bool]
"""
Should the built-in logging of this Function App be enabled? Defaults to `true`.
"""
enabled: pulumi.Output[bool]
"""
Is the Function App enabled?
"""
https_only: pulumi.Output[bool]
"""
Can the Function App only be accessed via HTTPS? Defaults to `false`.
"""
identity: pulumi.Output[dict]
"""
An `identity` block as defined below.
* `principalId` (`str`) - The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `tenantId` (`str`) - The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `type` (`str`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
"""
kind: pulumi.Output[str]
"""
The Function App kind - such as `functionapp,linux,container`
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
The name of the Connection String.
"""
outbound_ip_addresses: pulumi.Output[str]
"""
A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12`
"""
possible_outbound_ip_addresses: pulumi.Output[str]
"""
A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12,52.143.43.17` - not all of which are necessarily in use. Superset of `outbound_ip_addresses`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the Function App.
"""
site_config: pulumi.Output[dict]
"""
A `site_config` object as defined below.
* `alwaysOn` (`bool`) - Should the Function App be loaded at all times? Defaults to `false`.
* `cors` (`dict`) - A `cors` block as defined below.
* `allowedOrigins` (`list`)
* `supportCredentials` (`bool`)
* `http2Enabled` (`bool`) - Specifies whether or not the http2 protocol should be enabled. Defaults to `false`.
* `linuxFxVersion` (`str`) - Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`.
* `use32BitWorkerProcess` (`bool`) - Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to `true`.
* `virtualNetworkName` (`str`) - The name of the Virtual Network which this App Service should be attached to.
* `websocketsEnabled` (`bool`) - Should WebSockets be enabled?
"""
site_credential: pulumi.Output[dict]
"""
A `site_credential` block as defined below, which contains the site-level credentials used to publish to this App Service.
* `password` (`str`) - The password associated with the username, which can be used to publish to this App Service.
* `username` (`str`) - The username which can be used to publish to this App Service
"""
storage_connection_string: pulumi.Output[str]
"""
The connection string of the backend storage account which will be used by this Function App (such as the dashboard, logs).
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
version: pulumi.Output[str]
"""
The runtime version associated with the Function App. Defaults to `~1`.
"""
def __init__(__self__, resource_name, opts=None, app_service_plan_id=None, app_settings=None, auth_settings=None, client_affinity_enabled=None, connection_strings=None, enable_builtin_logging=None, enabled=None, https_only=None, identity=None, location=None, name=None, resource_group_name=None, site_config=None, storage_connection_string=None, tags=None, version=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Function App.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_plan_id: The ID of the App Service Plan within which to create this Function App.
:param pulumi.Input[dict] app_settings: A key-value pair of App Settings.
:param pulumi.Input[dict] auth_settings: A `auth_settings` block as defined below.
:param pulumi.Input[bool] client_affinity_enabled: Should the Function App send session affinity cookies, which route client requests in the same session to the same instance?
:param pulumi.Input[list] connection_strings: An `connection_string` block as defined below.
:param pulumi.Input[bool] enable_builtin_logging: Should the built-in logging of this Function App be enabled? Defaults to `true`.
:param pulumi.Input[bool] enabled: Is the Function App enabled?
:param pulumi.Input[bool] https_only: Can the Function App only be accessed via HTTPS? Defaults to `false`.
:param pulumi.Input[dict] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Connection String.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Function App.
:param pulumi.Input[dict] site_config: A `site_config` object as defined below.
:param pulumi.Input[str] storage_connection_string: The connection string of the backend storage account which will be used by this Function App (such as the dashboard, logs).
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: The runtime version associated with the Function App. Defaults to `~1`.
The **auth_settings** object supports the following:
* `activeDirectory` (`pulumi.Input[dict]`)
* `allowedAudiences` (`pulumi.Input[list]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `additionalLoginParams` (`pulumi.Input[dict]`)
* `allowedExternalRedirectUrls` (`pulumi.Input[list]`)
* `defaultProvider` (`pulumi.Input[str]`)
* `enabled` (`pulumi.Input[bool]`) - Is the Function App enabled?
* `facebook` (`pulumi.Input[dict]`)
* `app_id` (`pulumi.Input[str]`)
* `appSecret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `google` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `issuer` (`pulumi.Input[str]`)
* `microsoft` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `runtimeVersion` (`pulumi.Input[str]`)
* `tokenRefreshExtensionHours` (`pulumi.Input[float]`)
* `tokenStoreEnabled` (`pulumi.Input[bool]`)
* `twitter` (`pulumi.Input[dict]`)
* `consumerKey` (`pulumi.Input[str]`)
* `consumerSecret` (`pulumi.Input[str]`)
* `unauthenticatedClientAction` (`pulumi.Input[str]`)
The **connection_strings** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the Connection String.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
* `value` (`pulumi.Input[str]`) - The value for the Connection String.
The **identity** object supports the following:
* `principalId` (`pulumi.Input[str]`) - The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `tenantId` (`pulumi.Input[str]`) - The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
The **site_config** object supports the following:
* `alwaysOn` (`pulumi.Input[bool]`) - Should the Function App be loaded at all times? Defaults to `false`.
* `cors` (`pulumi.Input[dict]`) - A `cors` block as defined below.
* `allowedOrigins` (`pulumi.Input[list]`)
* `supportCredentials` (`pulumi.Input[bool]`)
* `http2Enabled` (`pulumi.Input[bool]`) - Specifies whether or not the http2 protocol should be enabled. Defaults to `false`.
* `linuxFxVersion` (`pulumi.Input[str]`) - Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`.
* `use32BitWorkerProcess` (`pulumi.Input[bool]`) - Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to `true`.
* `virtualNetworkName` (`pulumi.Input[str]`) - The name of the Virtual Network which this App Service should be attached to.
* `websocketsEnabled` (`pulumi.Input[bool]`) - Should WebSockets be enabled?
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/function_app.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if app_service_plan_id is None:
raise TypeError("Missing required property 'app_service_plan_id'")
__props__['app_service_plan_id'] = app_service_plan_id
__props__['app_settings'] = app_settings
__props__['auth_settings'] = auth_settings
__props__['client_affinity_enabled'] = client_affinity_enabled
__props__['connection_strings'] = connection_strings
__props__['enable_builtin_logging'] = enable_builtin_logging
__props__['enabled'] = enabled
__props__['https_only'] = https_only
__props__['identity'] = identity
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['site_config'] = site_config
if storage_connection_string is None:
raise TypeError("Missing required property 'storage_connection_string'")
__props__['storage_connection_string'] = storage_connection_string
__props__['tags'] = tags
__props__['version'] = version
__props__['default_hostname'] = None
__props__['kind'] = None
__props__['outbound_ip_addresses'] = None
__props__['possible_outbound_ip_addresses'] = None
__props__['site_credential'] = None
super(FunctionApp, __self__).__init__(
'azure:appservice/functionApp:FunctionApp',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, app_service_plan_id=None, app_settings=None, auth_settings=None, client_affinity_enabled=None, connection_strings=None, default_hostname=None, enable_builtin_logging=None, enabled=None, https_only=None, identity=None, kind=None, location=None, name=None, outbound_ip_addresses=None, possible_outbound_ip_addresses=None, resource_group_name=None, site_config=None, site_credential=None, storage_connection_string=None, tags=None, version=None):
"""
Get an existing FunctionApp resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_plan_id: The ID of the App Service Plan within which to create this Function App.
:param pulumi.Input[dict] app_settings: A key-value pair of App Settings.
:param pulumi.Input[dict] auth_settings: A `auth_settings` block as defined below.
:param pulumi.Input[bool] client_affinity_enabled: Should the Function App send session affinity cookies, which route client requests in the same session to the same instance?
:param pulumi.Input[list] connection_strings: An `connection_string` block as defined below.
:param pulumi.Input[str] default_hostname: The default hostname associated with the Function App - such as `mysite.azurewebsites.net`
:param pulumi.Input[bool] enable_builtin_logging: Should the built-in logging of this Function App be enabled? Defaults to `true`.
:param pulumi.Input[bool] enabled: Is the Function App enabled?
:param pulumi.Input[bool] https_only: Can the Function App only be accessed via HTTPS? Defaults to `false`.
:param pulumi.Input[dict] identity: An `identity` block as defined below.
:param pulumi.Input[str] kind: The Function App kind - such as `functionapp,linux,container`
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Connection String.
:param pulumi.Input[str] outbound_ip_addresses: A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12`
:param pulumi.Input[str] possible_outbound_ip_addresses: A comma separated list of outbound IP addresses - such as `52.23.25.3,52.143.43.12,52.143.43.17` - not all of which are necessarily in use. Superset of `outbound_ip_addresses`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Function App.
:param pulumi.Input[dict] site_config: A `site_config` object as defined below.
:param pulumi.Input[dict] site_credential: A `site_credential` block as defined below, which contains the site-level credentials used to publish to this App Service.
:param pulumi.Input[str] storage_connection_string: The connection string of the backend storage account which will be used by this Function App (such as the dashboard, logs).
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: The runtime version associated with the Function App. Defaults to `~1`.
The **auth_settings** object supports the following:
* `activeDirectory` (`pulumi.Input[dict]`)
* `allowedAudiences` (`pulumi.Input[list]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `additionalLoginParams` (`pulumi.Input[dict]`)
* `allowedExternalRedirectUrls` (`pulumi.Input[list]`)
* `defaultProvider` (`pulumi.Input[str]`)
* `enabled` (`pulumi.Input[bool]`) - Is the Function App enabled?
* `facebook` (`pulumi.Input[dict]`)
* `app_id` (`pulumi.Input[str]`)
* `appSecret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `google` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `issuer` (`pulumi.Input[str]`)
* `microsoft` (`pulumi.Input[dict]`)
* `client_id` (`pulumi.Input[str]`)
* `client_secret` (`pulumi.Input[str]`)
* `oauthScopes` (`pulumi.Input[list]`)
* `runtimeVersion` (`pulumi.Input[str]`)
* `tokenRefreshExtensionHours` (`pulumi.Input[float]`)
* `tokenStoreEnabled` (`pulumi.Input[bool]`)
* `twitter` (`pulumi.Input[dict]`)
* `consumerKey` (`pulumi.Input[str]`)
* `consumerSecret` (`pulumi.Input[str]`)
* `unauthenticatedClientAction` (`pulumi.Input[str]`)
The **connection_strings** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the Connection String.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
* `value` (`pulumi.Input[str]`) - The value for the Connection String.
The **identity** object supports the following:
* `principalId` (`pulumi.Input[str]`) - The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `tenantId` (`pulumi.Input[str]`) - The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service.
* `type` (`pulumi.Input[str]`) - The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`.
The **site_config** object supports the following:
* `alwaysOn` (`pulumi.Input[bool]`) - Should the Function App be loaded at all times? Defaults to `false`.
* `cors` (`pulumi.Input[dict]`) - A `cors` block as defined below.
* `allowedOrigins` (`pulumi.Input[list]`)
* `supportCredentials` (`pulumi.Input[bool]`)
* `http2Enabled` (`pulumi.Input[bool]`) - Specifies whether or not the http2 protocol should be enabled. Defaults to `false`.
* `linuxFxVersion` (`pulumi.Input[str]`) - Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`.
* `use32BitWorkerProcess` (`pulumi.Input[bool]`) - Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to `true`.
* `virtualNetworkName` (`pulumi.Input[str]`) - The name of the Virtual Network which this App Service should be attached to.
* `websocketsEnabled` (`pulumi.Input[bool]`) - Should WebSockets be enabled?
The **site_credential** object supports the following:
* `password` (`pulumi.Input[str]`) - The password associated with the username, which can be used to publish to this App Service.
* `username` (`pulumi.Input[str]`) - The username which can be used to publish to this App Service
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/function_app.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["app_service_plan_id"] = app_service_plan_id
__props__["app_settings"] = app_settings
__props__["auth_settings"] = auth_settings
__props__["client_affinity_enabled"] = client_affinity_enabled
__props__["connection_strings"] = connection_strings
__props__["default_hostname"] = default_hostname
__props__["enable_builtin_logging"] = enable_builtin_logging
__props__["enabled"] = enabled
__props__["https_only"] = https_only
__props__["identity"] = identity
__props__["kind"] = kind
__props__["location"] = location
__props__["name"] = name
__props__["outbound_ip_addresses"] = outbound_ip_addresses
__props__["possible_outbound_ip_addresses"] = possible_outbound_ip_addresses
__props__["resource_group_name"] = resource_group_name
__props__["site_config"] = site_config
__props__["site_credential"] = site_credential
__props__["storage_connection_string"] = storage_connection_string
__props__["tags"] = tags
__props__["version"] = version
return FunctionApp(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 55.718535
| 485
| 0.65284
| 2,871
| 24,349
| 5.357367
| 0.104145
| 0.089396
| 0.056433
| 0.019895
| 0.837267
| 0.810806
| 0.795917
| 0.773877
| 0.756453
| 0.735453
| 0
| 0.005909
| 0.235451
| 24,349
| 436
| 486
| 55.84633
| 0.820316
| 0.488275
| 0
| 0.018692
| 1
| 0
| 0.168133
| 0.057185
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037383
| false
| 0.009346
| 0.056075
| 0.018692
| 0.327103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7fbbfc00c62087ff680b4e49b6e3bdf810186d5
| 6,625
|
py
|
Python
|
ck/repo/module/user/module.py
|
santosh653/ck
|
f09b836df48598aff4db241b52c37899a73eb569
|
[
"BSD-3-Clause"
] | 37
|
2015-02-04T16:03:33.000Z
|
2021-01-03T13:24:29.000Z
|
ck/repo/module/user/module.py
|
santosh653/ck
|
f09b836df48598aff4db241b52c37899a73eb569
|
[
"BSD-3-Clause"
] | 3
|
2018-10-25T13:03:05.000Z
|
2018-11-13T05:14:57.000Z
|
ck/repo/module/user/module.py
|
santosh653/ck
|
f09b836df48598aff4db241b52c37899a73eb569
|
[
"BSD-3-Clause"
] | 4
|
2017-11-21T01:39:41.000Z
|
2020-08-09T19:22:43.000Z
|
#
# Collective Knowledge (web-based user auth)
#
#
#
#
# Developer:
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# authentication of a user
def auth(i):
"""
Input: {
(web_vars_post)
(web_vars_get)
(web_vars_session)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
user_id
session_id
}
"""
import re
import hashlib
rr={'return':0}
wvp=i.get('web_vars_post',{})
# Check username
username=wvp.get('username','').strip()
if username=='':
return {'return':1, 'error':'username is empty'}
if not re.match("^[A-Za-z0-9.@_-]*$", username):
return {'return':1, 'error':'username contains forbidden characters'}
# Check password
password=wvp.get('password','').strip()
if password=='':
return {'return':1, 'error':'password is empty'}
password_md5=hashlib.md5(password.encode('utf8')).hexdigest()
# Check if entry exists
default_repo=ck.cfg.get('default_repo_to_write','')
if default_repo=='': default_repo='local'
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'repo_uoa':default_repo,
'data_uoa':username})
if r['return']>0:
if r['return']==16: return {'return':16, 'error':'Username not registered'}
return r
d=r['dict']
load_password_md5=d.get('password_md5','')
if password_md5!=load_password_md5:
return {'return':1, 'error':'password did not match'}
# Generate random token for the session
r=ck.gen_uid({})
if r['return']>0: return r
rr['session_id']=r['data_uid']
rr['user_id']=username
return rr
##############################################################################
# create account
def create(i):
"""
Input: {
(web_vars_post)
(web_vars_get)
(web_vars_session)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
user_id
session_id
}
"""
import re
import hashlib
rr={'return':0}
wvp=i.get('web_vars_post',{})
# Check username
username=wvp.get('username','').strip()
if username=='':
return {'return':1, 'error':'username is empty'}
if not re.match("^[A-Za-z0-9.@_-]*$", username):
return {'return':1, 'error':'username contains forbidden characters'}
# Check password
password=wvp.get('password','').strip()
if password=='':
return {'return':1, 'error':'password is empty'}
password_md5=hashlib.md5(password.encode('utf8')).hexdigest()
# Check email
email=wvp.get('email','').strip()
if email=='':
return {'return':1, 'error':'email is empty'}
realname=wvp.get('realname','').strip()
# Check if entry exists
default_repo=ck.cfg.get('default_repo_to_write','')
if default_repo=='': default_repo='local'
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'repo_uoa':default_repo,
'data_uoa':username})
if r['return']==0:
return {'return':32, 'error':'Username already registered'}
if r['return']!=16: return r
# Create entry
d={'password_md5':password_md5,
'email':email,
'realname':realname}
r=ck.access({'action':'add',
'module_uoa':work['self_module_uid'],
'repo_uoa':default_repo,
'data_uoa':username,
'dict':d,
'sort_key':'yes'})
if r['return']>0: return r
# Generate random token for the session
r=ck.gen_uid({})
if r['return']>0: return r
rr['session_id']=r['data_uid']
rr['user_id']=username
return rr
##############################################################################
# renew user
def renew(i):
"""
Input: {
(web_vars_post)
(web_vars_get)
(web_vars_session)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
user_id
session_id
}
"""
import re
import hashlib
rr={'return':0}
wvp=i.get('web_vars_post',{})
# Check username
username=wvp.get('username','').strip()
if username=='':
return {'return':1, 'error':'username is empty'}
if not re.match("^[A-Za-z0-9.@_-]*$", username):
return {'return':1, 'error':'username contains forbidden characters'}
# Load user
default_repo=ck.cfg.get('default_repo_to_write','')
if default_repo=='': default_repo='local'
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'repo_uoa':default_repo,
'data_uoa':username})
if r['return']>0: return r
d=r['dict']
# Check password
password=wvp.get('password','').strip()
if password!='':
password_md5=hashlib.md5(password.encode('utf8')).hexdigest()
d['password_md5']=password_md5
# Check email
email=wvp.get('email','').strip()
if email!='':
d['email']=email
realname=wvp.get('realname','').strip()
if realname!='':
d['realname']=realname
# Update entry
r=ck.access({'action':'update',
'module_uoa':work['self_module_uid'],
'repo_uoa':default_repo,
'data_uoa':username,
'dict':d,
'substitute':'yes',
'sort_key':'yes'})
if r['return']>0: return r
return {'return':0}
| 25
| 82
| 0.497358
| 729
| 6,625
| 4.377229
| 0.146776
| 0.06769
| 0.04074
| 0.056409
| 0.821999
| 0.749922
| 0.749608
| 0.749608
| 0.734566
| 0.679724
| 0
| 0.014211
| 0.309585
| 6,625
| 264
| 83
| 25.094697
| 0.683428
| 0.257057
| 0
| 0.741379
| 0
| 0
| 0.26983
| 0.014443
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0.137931
| 0.051724
| 0
| 0.224138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
f7fd8222b161847ddcd7f5749f69a02e23c18396
| 30,040
|
py
|
Python
|
src/onecontainer_api/routers/tests/test_media.py
|
intel/stacks-api
|
904eeeb0eedee9d9b9cced32dcbf9d4b3871bc87
|
[
"BSD-3-Clause"
] | 4
|
2020-12-08T19:06:41.000Z
|
2021-08-13T09:32:21.000Z
|
src/onecontainer_api/routers/tests/test_media.py
|
intel/stacks-api
|
904eeeb0eedee9d9b9cced32dcbf9d4b3871bc87
|
[
"BSD-3-Clause"
] | 2
|
2020-12-15T20:35:39.000Z
|
2021-01-05T17:37:12.000Z
|
src/onecontainer_api/routers/tests/test_media.py
|
intel/stacks-api
|
904eeeb0eedee9d9b9cced32dcbf9d4b3871bc87
|
[
"BSD-3-Clause"
] | 4
|
2020-12-04T20:39:23.000Z
|
2021-01-04T10:26:33.000Z
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2020 Intel Corporation
import os
import time
from fastapi.testclient import TestClient
from onecontainer_api import models, schemas, config, startup_svc
from onecontainer_api.frontend import app
web_server_port = 80
rtmp_server_port = 1935
for svc in config.INITIAL_SERVICES:
if svc["image"] == "web-rtmp":
web_server_port = svc["port"]["80/tcp"]
rtmp_server_port = svc["port"]["1935/tcp"]
break
video_0 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/fruit-and-vegetable-detection.mp4"
video_1 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/bottle-detection.mp4"
video_2 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/face-demographics-walking.mp4"
rtmp_ip = f"{config.BACKEND_NETWORK_GATEWAY}:{rtmp_server_port}"
input_data = {
"source": video_0
}
probe_input = {'streams': [{'index': 0, 'codec_name': 'h264', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10', 'profile': 'High', 'codec_type': 'video', 'codec_time_base': '1001/120000', 'codec_tag_string': 'avc1', 'codec_tag': '0x31637661', 'width': 960, 'height': 540, 'coded_width': 960, 'coded_height': 544, 'closed_captions': 0, 'has_b_frames': 0, 'sample_aspect_ratio': '1:1', 'display_aspect_ratio': '16:9', 'pix_fmt': 'yuv420p', 'level': 32, 'color_range': 'tv', 'color_space': 'bt709', 'color_transfer': 'bt709', 'color_primaries': 'bt709', 'chroma_location': 'left', 'field_order': 'progressive', 'refs': 1, 'is_avc': 'true', 'nal_length_size': '4', 'r_frame_rate': '60000/1001', 'avg_frame_rate': '60000/1001', 'time_base': '1/60000', 'start_pts': 0, 'start_time': '0.000000', 'duration_ts': 3636633, 'duration': '60.610550', 'bit_rate': '2335818', 'bits_per_raw_sample': '8', 'nb_frames': '3633', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0}, 'tags': {'creation_time': '2018-06-15T21:05:12.000000Z', 'language': 'und', 'handler_name': 'Core Media Video'}}], 'format': {'filename': 'http://172.17.0.1:5553/sample-videos/fruit-and-vegetable-detection.mp4', 'nb_streams': 1, 'nb_programs': 0, 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'format_long_name': 'QuickTime / MOV', 'start_time': '0.000000', 'duration': '60.610550', 'size': '17760065', 'bit_rate': '2344154', 'probe_score': 100, 'tags': {'major_brand': 'mp42', 'minor_version': '1', 'compatible_brands': 'mp41mp42isom', 'creation_time': '2018-06-15T21:05:12.000000Z'}}}
supported_containers = ["mkv", "mp4", "mov", "m4a", "avi", "webm", "wmv", "vob"]
supported_audio_codecs = {
"aac": "aac",
"ogg": "libvorbis",
"wav": "pcm_s16le",
"flac": "flac",
"ac3": "ac3",
"wma": "wmav2",
}
supported_gpu_codecs = {
"mp4": "h264_vaapi",
"mkv": "hevc_vaapi",
"mov": "mjpeg_vaapi",
"webm": "vp8_vaapi"
}
pipeline_codecs = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "libx264"
}
]
}
]
}
pipeline_h264 = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "ultrafast",
"tune": "film",
"crf": "30"
}
}
]
}
]
}
pipeline_mpegts = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "mpegts",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_rtmp = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "flv",
"rtmp_ip": rtmp_ip,
"rtmp_path": "live",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_filters = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"filters": {
"scale": {
"w": "iw/2",
"h": -1
},
"deflicker": {
"mode": "pm",
"size": 10
},
"reverse": {},
"hue": {
"s": 0
}
}
},
{
"stream_type": "audio",
"filters": {
"atrim": {
"start": 1
},
"asetpts": "PTS-STARTPTS",
"volume": {
"volume": 0.8
},
"areverse": {},
"aphaser": {}
}
}
]
}
]
}
pipeline_copy = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "copy"
},
{
"stream_type": "audio",
"codec": "copy"
}
]
}
]
}
pipeline_empty = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4"
}
]
}
pipeline_mkv = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"params": {
"metadata": "stereo_mode=left_right",
"default_mode": "infer_no_subs"
}
}
]
}
pipeline_mp4 = {
"input_file": {
"source":video_1
},
"outputs": [
{
"container": "mp4",
"params": {
"movflags": "isml+frag_keyframe"
}
}
]
}
pipeline_aac = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "aac",
"channels": [
{
"stream_type": "audio",
"codec": "aac",
"codec_params": {
"ab": 192000,
"profile": "aac_ltp",
"strict": "-2",
}
},
{
"stream_type": "video",
"params": {
"vn": None
}
}
]
}
]
}
class TestMedia():
def setup_method(self):
models.Base.metadata.create_all(bind=models.engine)
def teardown_method(self):
os.remove(config.DATABASE_URL.split("///")[1])
def test_probe(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json=input_data)
assert response.status_code == 200
assert response.json() == probe_input
def test_probe_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={})
assert response.status_code == 400
assert response.json().get("status") == "InputFile field required: source"
def test_probe_wrong_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": "wrong"})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == ["wrong: No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": ""})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == [": No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": None})
assert response.status_code == 400
assert response.json().get("status") == "InputFile none is not an allowed value: source"
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": 1})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == ["1: No such file or directory"]
def test_pipeline_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"] = [{}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,container"
json_data["outputs"][0] = {"container": "test", "channels": [{}]}
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,channels,0,stream_type"
json_data["outputs"] = []
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == "No outputs specified"
json_data.pop("input_file")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: input_file"
def test_pipeline_unsupported_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"][0]["container"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == f"{output.get('id')}.wrong: Invalid argument"
json_data["outputs"][0]["container"] = "mkv"
json_data["outputs"][0]["channels"][0]["codec"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == "Unknown encoder 'wrong'"
json_data["outputs"][0]["channels"][0]["codec"] = "libx264"
json_data["outputs"][0]["channels"][0]["stream_type"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v {outputs[index]}"
def test_pipeline_copy(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_copy)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec copy -vcodec copy {outputs[index]}"
def test_pipeline_empty(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_empty)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
def test_pipeline_mkv(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mkv)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -default_mode infer_no_subs -metadata stereo_mode=left_right {outputs[index]}"
def test_pipeline_mp4(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mp4)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -movflags isml+frag_keyframe {outputs[index]}"
def test_pipeline_aac(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_aac)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -ab 192000 -acodec aac -profile:a aac_ltp -strict -2 -vn {outputs[index]}"
def test_pipeline_h264(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_h264)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -crf 30 -preset ultrafast -tune film -vcodec libx264 {outputs[index]}"
def test_pipeline_filters(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_filters)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(5)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_2} -filter_complex [0:v]scale=h=-1:w=iw/2[s0];[s0]deflicker=mode=pm:size=10[s1];[s1]reverse[s2];[s2]hue=s=0[s3];[0:a]atrim=start=1[s4];[s4]asetpts=PTS-STARTPTS[s5];[s5]volume=volume=0.8[s6];[s6]areverse[s7];[s7]aphaser[s8] -map [s3] -map [s8] {outputs[index]}"
def test_pipeline_supported_containers(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for container in supported_containers:
json_data["outputs"][0]["container"] = container
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_audio_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_audio_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["channels"] = [{"stream_type": "audio", "codec": codec}, {"stream_type": "video", "params": {"vn": None}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec {codec} -vn {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_gpu_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_gpu_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["params"] = {"vaapi_device": "/dev/dri/renderD128"}
json_data["outputs"][0]["channels"] = [{"stream_type": "video", "codec": codec, "params": {"vf":"format=nv12,hwupload"}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished or timeout == 0:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -vaapi_device /dev/dri/renderD128 -vcodec {codec} -vf format=nv12,hwupload {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_ttl(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["ttl"] = 5
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
result = response.json()
time.sleep(6)
response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == f"Pipeline {result['id']} doesn't exist"
def test_pipeline_azure_upload(self):
ks = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
bucket = os.getenv("CLOUD_STORAGE_BUCKET")
if ks and bucket:
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["outputs"][0]["storage"] = [{
"name": "azure",
"bucket": bucket,
"env": {
"AZURE_STORAGE_CONNECTION_STRING": ks
}
}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
# response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
def test_pipeline_mpegts(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_stop(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(2)
response = client.delete(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_rtmp(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_rtmp)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert outputs[index] == f"rtmp://{rtmp_ip}/live"
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f flv -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
| 43.854015
| 1,723
| 0.54737
| 3,423
| 30,040
| 4.666667
| 0.115396
| 0.060849
| 0.027607
| 0.033742
| 0.801114
| 0.762113
| 0.752974
| 0.73532
| 0.711406
| 0.708088
| 0
| 0.030702
| 0.291977
| 30,040
| 685
| 1,724
| 43.854015
| 0.720344
| 0.005027
| 0
| 0.583725
| 0
| 0.023474
| 0.273104
| 0.08064
| 0
| 0
| 0.000335
| 0
| 0.158059
| 1
| 0.034429
| false
| 0
| 0.007825
| 0
| 0.043818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7928960c4523bf9c3997eb56cd22e19cbb01eb65
| 40
|
py
|
Python
|
001113StepikPyGEK/StepikPyGEK001113сh01p04st10C03_20200407.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001113StepikPyGEK/StepikPyGEK001113сh01p04st10C03_20200407.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001113StepikPyGEK/StepikPyGEK001113сh01p04st10C03_20200407.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
print((1.6 * 10 ** 2) / (4 * 10 ** -2))
| 20
| 39
| 0.35
| 8
| 40
| 1.75
| 0.75
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.310345
| 0.275
| 40
| 1
| 40
| 40
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
f7178a80bd5a4eb44808ea505c773373aa6ce545
| 102
|
py
|
Python
|
example.py
|
CJSmekens/code-refinery-bq-1
|
12cf67033e82176da665156037204eb405a0f6a7
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
CJSmekens/code-refinery-bq-1
|
12cf67033e82176da665156037204eb405a0f6a7
|
[
"Apache-2.0"
] | 2
|
2021-06-07T10:01:32.000Z
|
2021-06-07T10:10:18.000Z
|
example.py
|
CJSmekens/code-refinery-bq-1
|
12cf67033e82176da665156037204eb405a0f6a7
|
[
"Apache-2.0"
] | 1
|
2021-06-07T09:53:19.000Z
|
2021-06-07T09:53:19.000Z
|
def add(a,b):
return a + b
def subtract(a,b):
return a - b
def product(a,b):
return a * b
| 11.333333
| 18
| 0.558824
| 21
| 102
| 2.714286
| 0.333333
| 0.210526
| 0.421053
| 0.473684
| 0.631579
| 0.45614
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 102
| 8
| 19
| 12.75
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
f72779dd707f715614f9cebb5334c248393f7004
| 90
|
py
|
Python
|
03_operator/03_operator.py
|
Jasper-Li/PythonTutorial
|
270d18a1830ae74eaa6fc797a6d4f672688f1cee
|
[
"CC0-1.0"
] | null | null | null |
03_operator/03_operator.py
|
Jasper-Li/PythonTutorial
|
270d18a1830ae74eaa6fc797a6d4f672688f1cee
|
[
"CC0-1.0"
] | null | null | null |
03_operator/03_operator.py
|
Jasper-Li/PythonTutorial
|
270d18a1830ae74eaa6fc797a6d4f672688f1cee
|
[
"CC0-1.0"
] | null | null | null |
# 03 operator
print(4+3)
print(4-3)
print(4*3)
print(4 ** 3) # 这个小学没学
print(4/3)
| 11.25
| 24
| 0.566667
| 18
| 90
| 2.833333
| 0.333333
| 0.588235
| 0.686275
| 0.705882
| 0.54902
| 0.54902
| 0.54902
| 0.54902
| 0.54902
| 0
| 0
| 0.173913
| 0.233333
| 90
| 7
| 25
| 12.857143
| 0.565217
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
f772e92eb3dfe07f3382da9292b0260d436b98eb
| 5,967
|
py
|
Python
|
tests/test_yes_com.py
|
fabian-hk/dnssec_scanner
|
437ca39bb84e592b81f146c9e711c93687a43ae3
|
[
"BSD-2-Clause"
] | 1
|
2020-03-19T16:02:11.000Z
|
2020-03-19T16:02:11.000Z
|
tests/test_yes_com.py
|
fabian-hk/dnssec_scanner
|
437ca39bb84e592b81f146c9e711c93687a43ae3
|
[
"BSD-2-Clause"
] | 1
|
2020-03-06T02:35:49.000Z
|
2020-03-12T16:41:43.000Z
|
tests/test_yes_com.py
|
fabian-hk/dnssec_scanner
|
437ca39bb84e592b81f146c9e711c93687a43ae3
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import dns.rdatatype
import re
from tests.utils.custom_test_case import CustomTestCase as CTC
from dnssec_scanner import DNSSECScanner, State
from dnssec_scanner.messages import Validator, Msg, Types
from tests.utils.messages_testing import TestMessage
log = logging.getLogger("dnssec_scanner")
log.setLevel(logging.WARNING)
class YesCom(CTC):
# fmt: off
LOGS = [
str(TestMessage(".", "", Types.KSK, CTC.SINGLE_PATTERN, Msg.VALIDATED, Validator.DS, CTC.SINGLE_PATTERN)),
str(TestMessage(".", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.KSK,
CTC.SINGLE_PATTERN)),
str(TestMessage(".", "com.", dns.rdatatype.DS, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("com.", "", Types.KSK, CTC.SINGLE_PATTERN, Msg.VALIDATED, Validator.DS, CTC.SINGLE_PATTERN)),
str(TestMessage("com.", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.KSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("com.", "yes.com.", dns.rdatatype.DS, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "", Types.KSK, CTC.SINGLE_PATTERN, Msg.VALIDATED, Validator.DS,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.KSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.A, "", Msg.VALIDATED, Validator.ZSK, CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.NS, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.CAA, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.SOA, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.MX, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.TXT, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.NSEC3PARAM, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", "TYPE65534", "", Msg.VALIDATED, Validator.ZSK, CTC.SINGLE_PATTERN)),
]
WARNIGNS = []
ERRORS = []
# fmt: on
def test_dnssec(self):
scanner = DNSSECScanner("yes.com")
result = scanner.run()
self.assert_list(self.LOGS, result.logs)
self.assert_list(self.WARNIGNS, result.warnings)
self.assert_list(self.ERRORS, result.errors)
self.assertEqual(State.SECURE, result.state)
class YesComNonExistence(CTC):
# fmt: off
LOGS = [
str(TestMessage(".", "", Types.KSK, CTC.SINGLE_PATTERN, Msg.VALIDATED, Validator.DS, CTC.SINGLE_PATTERN)),
str(TestMessage(".", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.KSK,
CTC.SINGLE_PATTERN)),
str(TestMessage(".", "com.", dns.rdatatype.DS, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("com.", "", Types.KSK, CTC.SINGLE_PATTERN, Msg.VALIDATED, Validator.DS, CTC.SINGLE_PATTERN)),
str(TestMessage("com.", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.KSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("com.", "yes.com.", dns.rdatatype.DS, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "", Types.KSK, CTC.SINGLE_PATTERN, Msg.VALIDATED, Validator.DS,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.KSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "", dns.rdatatype.DNSKEY, CTC.MULTI_PATTERN, Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.SOA, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "[a-z0-9]{32}.yes.com.", dns.rdatatype.NSEC3, "", Msg.VALIDATED,
Validator.ZSK, CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "[a-z0-9]{32}.yes.com.", dns.rdatatype.NSEC3, "", Msg.VALIDATED,
Validator.ZSK, CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "[a-z0-9]{32}.yes.com.", dns.rdatatype.NSEC3, "", Msg.VALIDATED,
Validator.ZSK, CTC.SINGLE_PATTERN)),
str(TestMessage("yes.com.", "yes.com.", dns.rdatatype.NSEC3PARAM, "", Msg.VALIDATED, Validator.ZSK,
CTC.SINGLE_PATTERN)),
re.escape("yes.com. zone: Found closest encloser yes.com."),
re.escape("yes.com. zone: Found NSEC3 that covers the next closer name a.yes.com."),
re.escape("yes.com. zone: Found NSEC3 that covers the wildcard *.yes.com.")
]
WARNIGNS = []
ERRORS = []
# fmt: on
def test_dnssec(self):
scanner = DNSSECScanner("a.yes.com")
result = scanner.run()
self.assert_list(self.LOGS, result.logs)
self.assert_list(self.WARNIGNS, result.warnings)
self.assert_list(self.ERRORS, result.errors)
self.assertEqual(State.SECURE, result.state)
| 50.567797
| 120
| 0.613038
| 689
| 5,967
| 5.217707
| 0.117562
| 0.070097
| 0.164673
| 0.153268
| 0.893463
| 0.893463
| 0.887065
| 0.875939
| 0.875939
| 0.875939
| 0
| 0.00524
| 0.232445
| 5,967
| 117
| 121
| 51
| 0.779694
| 0.00553
| 0
| 0.73913
| 0
| 0
| 0.095463
| 0.010626
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.021739
| false
| 0
| 0.076087
| 0
| 0.184783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f77aa4076cd3de22797056265439280ce6b2e2d7
| 3,375
|
py
|
Python
|
catstrace/events/disk_events.py
|
dsrhaslab/catstrace
|
03c7130f8181b05880645b9d8a8de573b3c64380
|
[
"MIT"
] | null | null | null |
catstrace/events/disk_events.py
|
dsrhaslab/catstrace
|
03c7130f8181b05880645b9d8a8de573b3c64380
|
[
"MIT"
] | null | null | null |
catstrace/events/disk_events.py
|
dsrhaslab/catstrace
|
03c7130f8181b05880645b9d8a8de573b3c64380
|
[
"MIT"
] | null | null | null |
from catstrace.events.base_event import Event, EventType
import simplejson as json
import codecs
class DiskEvent(Event):
def __init__(self, timestamp, host, pid, filename, fd):
self._fd = fd
if (fd == 0): self._filename = "STDIN"
elif (fd == 1): self._filename = "STDOUT"
elif (fd == 2): self._filename = "STDERR"
else: self._filename = filename
super(DiskEvent, self).__init__(timestamp, host, pid)
class DiskOpenEvent(DiskEvent):
def __init__(self, timestamp, host, pid, filename, fd):
self._type = EventType.DISK_OPEN
super(DiskOpenEvent, self).__init__(timestamp, host, pid, filename, fd)
def to_string(self):
event = {
"timestamp": self._timestamp,
"type": self._type,
"thread": self._thread,
"pid": self._pid,
"data": {
"host": self._host
},
"fd": self._fd,
"filename": self._filename,
}
return event
def to_json(self):
return json.dumps(self.to_string())
class DiskWriteEvent(DiskEvent):
def __init__(self, timestamp, host, pid, filename, fd, offset, size, returned_value, msg, saveAsText):
self._type = EventType.DISK_WRITE
self._offset = offset
self._size = size
self._returned_value = returned_value
self._msg = codecs.unicode_escape_decode(msg)[0]
self._msg_len = len(self._msg)
if not saveAsText:
self._signature = self.compute_minhashes(self._msg, self._msg_len)
self._msg_len = -1
super(DiskWriteEvent, self).__init__(timestamp, host, pid, filename, fd)
def to_string(self):
event = {
"timestamp": self._timestamp,
"type": self._type,
"thread": self._thread,
"pid": self._pid,
"size": self._size,
"returned_value": self._returned_value,
"data": {
"host": self._host
},
}
if (self._fd is not None): event["fd"] = self._fd
if (self._offset is not None): event["offset"] = self._offset
if (self._filename is not None): event["filename"] = self._filename
if (self._msg_len != -1):
event["data"]["msg"] = self._msg
event["data"]["msg_len"] = self._msg_len
else:
event["data"]["signature"] = self._signature
return event
def to_json(self):
return json.dumps(self.to_string())
class DiskReadEvent(DiskEvent):
def __init__(self, timestamp, host, pid, filename, fd, offset, size, returned_value, msg, saveAsText):
self._type = EventType.DISK_READ
self._offset = offset
self._size = size
self._returned_value = returned_value
self._msg = codecs.unicode_escape_decode(msg)[0]
self._msg_len = len(self._msg)
if not saveAsText:
self._signature = self.compute_minhashes(self._msg, self._msg_len)
self._msg_len = -1
super(DiskReadEvent, self).__init__(timestamp, host, pid, filename, fd)
def to_string(self):
event = {
"timestamp": self._timestamp,
"type": self._type,
"thread": self._thread,
"pid": self._pid,
"size": self._size,
"returned_value": self._returned_value,
"data": {
"host": self._host
},
}
if (self._fd is not None): event["fd"] = self._fd
if (self._offset is not None): event["offset"] = self._offset
if (self._filename is not None): event["filename"] = self._filename
if (self._msg_len != -1):
event["data"]["msg"] = self._msg
event["data"]["msg_len"] = self._msg_len
else:
event["data"]["signature"] = self._signature
return event
def to_json(self):
return json.dumps(self.to_string())
| 27.892562
| 103
| 0.674667
| 455
| 3,375
| 4.692308
| 0.134066
| 0.059016
| 0.046838
| 0.078689
| 0.822482
| 0.811241
| 0.811241
| 0.811241
| 0.811241
| 0.768618
| 0
| 0.003244
| 0.178074
| 3,375
| 121
| 104
| 27.892562
| 0.766402
| 0
| 0
| 0.73
| 0
| 0
| 0.073164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.03
| 0.03
| 0.23
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f77f885e6f7384e5d877fbe690df466633b75cb6
| 8,142
|
py
|
Python
|
miniconda3-lnx/pyzo-4.10.2/source/pyzo/util/qt/QtWidgets.py
|
Thibaut-Kovaltchouk/MultiPyzo
|
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
|
[
"CC0-1.0"
] | 235
|
2016-03-05T17:12:12.000Z
|
2022-03-22T06:35:45.000Z
|
miniconda3-lnx/pyzo-4.10.2/source/pyzo/util/qt/QtWidgets.py
|
Thibaut-Kovaltchouk/MultiPyzo
|
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
|
[
"CC0-1.0"
] | 423
|
2016-02-15T20:23:46.000Z
|
2022-03-26T16:36:37.000Z
|
miniconda3-lnx/pyzo-4.10.2/source/pyzo/util/qt/QtWidgets.py
|
Thibaut-Kovaltchouk/MultiPyzo
|
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
|
[
"CC0-1.0"
] | 115
|
2016-04-01T14:31:33.000Z
|
2022-03-17T10:59:45.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder Developmet Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides widget classes and functions.
.. warning:: Only PyQt4/PySide QtGui classes compatible with PyQt5.QtWidgets
are exposed here. Therefore, you need to treat/use this package as if it
were the ``PyQt5.QtWidgets`` module.
"""
from . import PYQT5, PYSIDE2, PYQT4, PYSIDE, PythonQtError
from ._patch.qcombobox import patch_qcombobox
from ._patch.qheaderview import introduce_renamed_methods_qheaderview
if PYQT5:
from PyQt5.QtWidgets import *
elif PYSIDE2:
from PySide2.QtWidgets import *
elif PYQT4:
from PyQt4.QtGui import *
QStyleOptionViewItem = QStyleOptionViewItemV4
del QStyleOptionViewItemV4
# These objects belong to QtGui
try:
# Older versions of PyQt4 do not provide these
del (
QGlyphRun,
QMatrix2x2,
QMatrix2x3,
QMatrix2x4,
QMatrix3x2,
QMatrix3x3,
QMatrix3x4,
QMatrix4x2,
QMatrix4x3,
QMatrix4x4,
QQuaternion,
QRadialGradient,
QRawFont,
QRegExpValidator,
QStaticText,
QTouchEvent,
QVector2D,
QVector3D,
QVector4D,
qFuzzyCompare,
)
except NameError:
pass
del (
QAbstractTextDocumentLayout,
QActionEvent,
QBitmap,
QBrush,
QClipboard,
QCloseEvent,
QColor,
QConicalGradient,
QContextMenuEvent,
QCursor,
QDesktopServices,
QDoubleValidator,
QDrag,
QDragEnterEvent,
QDragLeaveEvent,
QDragMoveEvent,
QDropEvent,
QFileOpenEvent,
QFocusEvent,
QFont,
QFontDatabase,
QFontInfo,
QFontMetrics,
QFontMetricsF,
QGradient,
QHelpEvent,
QHideEvent,
QHoverEvent,
QIcon,
QIconDragEvent,
QIconEngine,
QImage,
QImageIOHandler,
QImageReader,
QImageWriter,
QInputEvent,
QInputMethodEvent,
QKeyEvent,
QKeySequence,
QLinearGradient,
QMouseEvent,
QMoveEvent,
QMovie,
QPaintDevice,
QPaintEngine,
QPaintEngineState,
QPaintEvent,
QPainter,
QPainterPath,
QPainterPathStroker,
QPalette,
QPen,
QPicture,
QPictureIO,
QPixmap,
QPixmapCache,
QPolygon,
QPolygonF,
QRegion,
QResizeEvent,
QSessionManager,
QShortcutEvent,
QShowEvent,
QStandardItem,
QStandardItemModel,
QStatusTipEvent,
QSyntaxHighlighter,
QTabletEvent,
QTextBlock,
QTextBlockFormat,
QTextBlockGroup,
QTextBlockUserData,
QTextCharFormat,
QTextCursor,
QTextDocument,
QTextDocumentFragment,
QTextDocumentWriter,
QTextFormat,
QTextFragment,
QTextFrame,
QTextFrameFormat,
QTextImageFormat,
QTextInlineObject,
QTextItem,
QTextLayout,
QTextLength,
QTextLine,
QTextList,
QTextListFormat,
QTextObject,
QTextObjectInterface,
QTextOption,
QTextTable,
QTextTableCell,
QTextTableCellFormat,
QTextTableFormat,
QTransform,
QValidator,
QWhatsThisClickedEvent,
QWheelEvent,
QWindowStateChangeEvent,
qAlpha,
qBlue,
qGray,
qGreen,
qIsGray,
qRed,
qRgb,
qRgba,
QIntValidator,
QStringListModel,
)
# These objects belong to QtPrintSupport
del (
QAbstractPrintDialog,
QPageSetupDialog,
QPrintDialog,
QPrintEngine,
QPrintPreviewDialog,
QPrintPreviewWidget,
QPrinter,
QPrinterInfo,
)
# These objects belong to QtCore
del (
QItemSelection,
QItemSelectionModel,
QItemSelectionRange,
QSortFilterProxyModel,
)
# Patch QComboBox to allow Python objects to be passed to userData
patch_qcombobox(QComboBox)
# QHeaderView: renamed methods
introduce_renamed_methods_qheaderview(QHeaderView)
elif PYSIDE:
from PySide.QtGui import *
QStyleOptionViewItem = QStyleOptionViewItemV4
del QStyleOptionViewItemV4
# These objects belong to QtGui
del (
QAbstractTextDocumentLayout,
QActionEvent,
QBitmap,
QBrush,
QClipboard,
QCloseEvent,
QColor,
QConicalGradient,
QContextMenuEvent,
QCursor,
QDesktopServices,
QDoubleValidator,
QDrag,
QDragEnterEvent,
QDragLeaveEvent,
QDragMoveEvent,
QDropEvent,
QFileOpenEvent,
QFocusEvent,
QFont,
QFontDatabase,
QFontInfo,
QFontMetrics,
QFontMetricsF,
QGradient,
QHelpEvent,
QHideEvent,
QHoverEvent,
QIcon,
QIconDragEvent,
QIconEngine,
QImage,
QImageIOHandler,
QImageReader,
QImageWriter,
QInputEvent,
QInputMethodEvent,
QKeyEvent,
QKeySequence,
QLinearGradient,
QMatrix2x2,
QMatrix2x3,
QMatrix2x4,
QMatrix3x2,
QMatrix3x3,
QMatrix3x4,
QMatrix4x2,
QMatrix4x3,
QMatrix4x4,
QMouseEvent,
QMoveEvent,
QMovie,
QPaintDevice,
QPaintEngine,
QPaintEngineState,
QPaintEvent,
QPainter,
QPainterPath,
QPainterPathStroker,
QPalette,
QPen,
QPicture,
QPictureIO,
QPixmap,
QPixmapCache,
QPolygon,
QPolygonF,
QQuaternion,
QRadialGradient,
QRegExpValidator,
QRegion,
QResizeEvent,
QSessionManager,
QShortcutEvent,
QShowEvent,
QStandardItem,
QStandardItemModel,
QStatusTipEvent,
QSyntaxHighlighter,
QTabletEvent,
QTextBlock,
QTextBlockFormat,
QTextBlockGroup,
QTextBlockUserData,
QTextCharFormat,
QTextCursor,
QTextDocument,
QTextDocumentFragment,
QTextFormat,
QTextFragment,
QTextFrame,
QTextFrameFormat,
QTextImageFormat,
QTextInlineObject,
QTextItem,
QTextLayout,
QTextLength,
QTextLine,
QTextList,
QTextListFormat,
QTextObject,
QTextObjectInterface,
QTextOption,
QTextTable,
QTextTableCell,
QTextTableCellFormat,
QTextTableFormat,
QTouchEvent,
QTransform,
QValidator,
QVector2D,
QVector3D,
QVector4D,
QWhatsThisClickedEvent,
QWheelEvent,
QWindowStateChangeEvent,
qAlpha,
qBlue,
qGray,
qGreen,
qIsGray,
qRed,
qRgb,
qRgba,
QIntValidator,
QStringListModel,
)
# These objects belong to QtPrintSupport
del (
QAbstractPrintDialog,
QPageSetupDialog,
QPrintDialog,
QPrintEngine,
QPrintPreviewDialog,
QPrintPreviewWidget,
QPrinter,
QPrinterInfo,
)
# These objects belong to QtCore
del (
QItemSelection,
QItemSelectionModel,
QItemSelectionRange,
QSortFilterProxyModel,
)
# Patch QComboBox to allow Python objects to be passed to userData
patch_qcombobox(QComboBox)
# QHeaderView: renamed methods
introduce_renamed_methods_qheaderview(QHeaderView)
else:
raise PythonQtError("No Qt bindings could be found")
| 22.491713
| 76
| 0.577868
| 499
| 8,142
| 9.40481
| 0.47495
| 0.017899
| 0.023013
| 0.02557
| 0.800767
| 0.800767
| 0.800767
| 0.800767
| 0.800767
| 0.762412
| 0
| 0.014132
| 0.374232
| 8,142
| 361
| 77
| 22.554017
| 0.906575
| 0.103906
| 0
| 0.912226
| 0
| 0
| 0.003988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.003135
| 0.021944
| 0
| 0.021944
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3ea7e8c6f7f21d1d3940611578470c7182b31b1
| 93
|
py
|
Python
|
lib/python2.7/site-packages/networkx/algorithms/traversal/__init__.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 15
|
2018-04-26T08:17:18.000Z
|
2021-03-05T08:44:13.000Z
|
lib/python2.7/site-packages/networkx/algorithms/traversal/__init__.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | null | null | null |
lib/python2.7/site-packages/networkx/algorithms/traversal/__init__.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 6
|
2018-04-12T15:49:27.000Z
|
2022-01-27T12:34:50.000Z
|
from .depth_first_search import *
from .breadth_first_search import *
from .edgedfs import *
| 23.25
| 35
| 0.806452
| 13
| 93
| 5.461538
| 0.538462
| 0.309859
| 0.478873
| 0.591549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 93
| 3
| 36
| 31
| 0.876543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e3f72522f80cc8c22f9811f3ea5ccf242a290932
| 8,568
|
py
|
Python
|
heat/core/tests/test_relational.py
|
Mystic-Slice/heat
|
dd1b83d6d8b36cb4a70eefc631f00277b0745fee
|
[
"MIT"
] | null | null | null |
heat/core/tests/test_relational.py
|
Mystic-Slice/heat
|
dd1b83d6d8b36cb4a70eefc631f00277b0745fee
|
[
"MIT"
] | null | null | null |
heat/core/tests/test_relational.py
|
Mystic-Slice/heat
|
dd1b83d6d8b36cb4a70eefc631f00277b0745fee
|
[
"MIT"
] | null | null | null |
import heat as ht
from .test_suites.basic_test import TestCase
class TestRelational(TestCase):
@classmethod
def setUpClass(cls):
super(TestRelational, cls).setUpClass()
cls.a_scalar = 2.0
cls.an_int_scalar = 2
cls.a_vector = ht.float32([2, 2])
cls.another_vector = ht.float32([2, 2, 2])
cls.a_tensor = ht.array([[1.0, 2.0], [3.0, 4.0]])
cls.another_tensor = ht.array([[2.0, 2.0], [2.0, 2.0]])
cls.a_split_tensor = cls.another_tensor.copy().resplit_(0)
cls.split_ones_tensor = ht.ones((2, 2), split=1)
cls.errorneous_type = (2, 2)
def test_eq(self):
result = ht.array([[False, True], [False, False]])
self.assertTrue(ht.equal(ht.eq(self.a_scalar, self.a_scalar), ht.array(True)))
self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.a_scalar), result))
self.assertTrue(ht.equal(ht.eq(self.a_scalar, self.a_tensor), result))
self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.another_tensor), result))
self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.a_vector), result))
self.assertTrue(ht.equal(ht.eq(self.a_tensor, self.an_int_scalar), result))
self.assertTrue(ht.equal(ht.eq(self.a_split_tensor, self.a_tensor), result))
self.assertEqual(ht.eq(self.a_split_tensor, self.a_tensor).dtype, ht.bool)
with self.assertRaises(ValueError):
ht.eq(self.a_tensor, self.another_vector)
with self.assertRaises(TypeError):
ht.eq(self.a_tensor, self.errorneous_type)
with self.assertRaises(TypeError):
ht.eq("self.a_tensor", "s")
def test_equal(self):
self.assertTrue(ht.equal(self.a_tensor, self.a_tensor))
self.assertFalse(ht.equal(self.a_tensor[1:], self.a_tensor))
self.assertFalse(ht.equal(self.a_split_tensor[1:], self.a_tensor[1:]))
self.assertFalse(ht.equal(self.a_tensor[1:], self.a_split_tensor[1:]))
self.assertFalse(ht.equal(self.a_tensor, self.another_tensor))
self.assertFalse(ht.equal(self.a_tensor, self.a_scalar))
self.assertFalse(ht.equal(self.a_scalar, self.a_tensor))
self.assertFalse(ht.equal(self.a_scalar, self.a_tensor[0, 0]))
self.assertFalse(ht.equal(self.a_tensor[0, 0], self.a_scalar))
self.assertFalse(ht.equal(self.another_tensor, self.a_scalar))
self.assertTrue(ht.equal(self.split_ones_tensor[:, 0], self.split_ones_tensor[:, 1]))
self.assertTrue(ht.equal(self.split_ones_tensor[:, 1], self.split_ones_tensor[:, 0]))
self.assertFalse(ht.equal(self.a_tensor, self.a_split_tensor))
self.assertFalse(ht.equal(self.a_split_tensor, self.a_tensor))
arr = ht.array([[1, 2], [3, 4]], comm=ht.MPI_SELF)
with self.assertRaises(NotImplementedError):
ht.equal(self.a_tensor, arr)
with self.assertRaises(ValueError):
ht.equal(self.a_split_tensor, self.a_split_tensor.resplit(1))
def test_ge(self):
result = ht.uint8([[False, True], [True, True]])
commutated_result = ht.array([[True, True], [False, False]])
self.assertTrue(ht.equal(ht.ge(self.a_scalar, self.a_scalar), ht.array(True)))
self.assertTrue(ht.equal(ht.ge(self.a_tensor, self.a_scalar), result))
self.assertTrue(ht.equal(ht.ge(self.a_scalar, self.a_tensor), commutated_result))
self.assertTrue(ht.equal(ht.ge(self.a_tensor, self.another_tensor), result))
self.assertTrue(ht.equal(ht.ge(self.a_tensor, self.a_vector), result))
self.assertTrue(ht.equal(ht.ge(self.a_tensor, self.an_int_scalar), result))
self.assertTrue(ht.equal(ht.ge(self.a_split_tensor, self.a_tensor), commutated_result))
self.assertEqual(ht.ge(self.a_split_tensor, self.a_tensor).dtype, ht.bool)
with self.assertRaises(ValueError):
ht.ge(self.a_tensor, self.another_vector)
with self.assertRaises(TypeError):
ht.ge(self.a_tensor, self.errorneous_type)
with self.assertRaises(TypeError):
ht.ge("self.a_tensor", "s")
def test_gt(self):
result = ht.array([[False, False], [True, True]])
commutated_result = ht.array([[True, False], [False, False]])
self.assertTrue(ht.equal(ht.gt(self.a_scalar, self.a_scalar), ht.array(False)))
self.assertTrue(ht.equal(ht.gt(self.a_tensor, self.a_scalar), result))
self.assertTrue(ht.equal(ht.gt(self.a_scalar, self.a_tensor), commutated_result))
self.assertTrue(ht.equal(ht.gt(self.a_tensor, self.another_tensor), result))
self.assertTrue(ht.equal(ht.gt(self.a_tensor, self.a_vector), result))
self.assertTrue(ht.equal(ht.gt(self.a_tensor, self.an_int_scalar), result))
self.assertTrue(ht.equal(ht.gt(self.a_split_tensor, self.a_tensor), commutated_result))
self.assertEqual(ht.gt(self.a_split_tensor, self.a_tensor).dtype, ht.bool)
with self.assertRaises(ValueError):
ht.gt(self.a_tensor, self.another_vector)
with self.assertRaises(TypeError):
ht.gt(self.a_tensor, self.errorneous_type)
with self.assertRaises(TypeError):
ht.gt("self.a_tensor", "s")
def test_le(self):
result = ht.array([[True, True], [False, False]])
commutated_result = ht.array([[False, True], [True, True]])
self.assertTrue(ht.equal(ht.le(self.a_scalar, self.a_scalar), ht.array(True)))
self.assertTrue(ht.equal(ht.le(self.a_tensor, self.a_scalar), result))
self.assertTrue(ht.equal(ht.le(self.a_scalar, self.a_tensor), commutated_result))
self.assertTrue(ht.equal(ht.le(self.a_tensor, self.another_tensor), result))
self.assertTrue(ht.equal(ht.le(self.a_tensor, self.a_vector), result))
self.assertTrue(ht.equal(ht.le(self.a_tensor, self.an_int_scalar), result))
self.assertTrue(ht.equal(ht.le(self.a_split_tensor, self.a_tensor), commutated_result))
self.assertEqual(ht.le(self.a_split_tensor, self.a_tensor).dtype, ht.bool)
with self.assertRaises(ValueError):
ht.le(self.a_tensor, self.another_vector)
with self.assertRaises(TypeError):
ht.le(self.a_tensor, self.errorneous_type)
with self.assertRaises(TypeError):
ht.le("self.a_tensor", "s")
def test_lt(self):
result = ht.array([[True, False], [False, False]])
commutated_result = ht.array([[False, False], [True, True]])
self.assertTrue(ht.equal(ht.lt(self.a_scalar, self.a_scalar), ht.array(False)))
self.assertTrue(ht.equal(ht.lt(self.a_tensor, self.a_scalar), result))
self.assertTrue(ht.equal(ht.lt(self.a_scalar, self.a_tensor), commutated_result))
self.assertTrue(ht.equal(ht.lt(self.a_tensor, self.another_tensor), result))
self.assertTrue(ht.equal(ht.lt(self.a_tensor, self.a_vector), result))
self.assertTrue(ht.equal(ht.lt(self.a_tensor, self.an_int_scalar), result))
self.assertTrue(ht.equal(ht.lt(self.a_split_tensor, self.a_tensor), commutated_result))
self.assertEqual(ht.lt(self.a_split_tensor, self.a_tensor).dtype, ht.bool)
with self.assertRaises(ValueError):
ht.lt(self.a_tensor, self.another_vector)
with self.assertRaises(TypeError):
ht.lt(self.a_tensor, self.errorneous_type)
with self.assertRaises(TypeError):
ht.lt("self.a_tensor", "s")
def test_ne(self):
result = ht.array([[True, False], [True, True]])
# self.assertTrue(ht.equal(ht.ne(self.a_scalar, self.a_scalar), ht.array([False])))
# self.assertTrue(ht.equal(ht.ne(self.a_tensor, self.a_scalar), result))
# self.assertTrue(ht.equal(ht.ne(self.a_scalar, self.a_tensor), result))
# self.assertTrue(ht.equal(ht.ne(self.a_tensor, self.another_tensor), result))
# self.assertTrue(ht.equal(ht.ne(self.a_tensor, self.a_vector), result))
# self.assertTrue(ht.equal(ht.ne(self.a_tensor, self.an_int_scalar), result))
self.assertTrue(ht.equal(ht.ne(self.a_split_tensor, self.a_tensor), result))
self.assertTrue(ht.equal(self.a_split_tensor != self.a_tensor, result))
self.assertEqual(ht.ne(self.a_split_tensor, self.a_tensor).dtype, ht.bool)
with self.assertRaises(ValueError):
ht.ne(self.a_tensor, self.another_vector)
with self.assertRaises(TypeError):
ht.ne(self.a_tensor, self.errorneous_type)
with self.assertRaises(TypeError):
ht.ne("self.a_tensor", "s")
| 51
| 95
| 0.668067
| 1,276
| 8,568
| 4.310345
| 0.049373
| 0.117273
| 0.15
| 0.175636
| 0.915091
| 0.887818
| 0.860909
| 0.814364
| 0.767091
| 0.697455
| 0
| 0.007543
| 0.179972
| 8,568
| 167
| 96
| 51.305389
| 0.775263
| 0.052171
| 0
| 0.146154
| 0
| 0
| 0.010352
| 0
| 0
| 0
| 0
| 0
| 0.592308
| 1
| 0.061538
| false
| 0
| 0.015385
| 0
| 0.084615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
542076448ebe080a7744aa980d56f37d188c54f5
| 34,203
|
py
|
Python
|
paida-3.2.1_2.10.1/paida/paida_core/IDataPointSetFactory.py
|
AshleyChraya/HubbleConstant-ConstraintsForVCG
|
634c15d296147ec1cdc3c92af1fbbfeb17844586
|
[
"MIT"
] | null | null | null |
paida-3.2.1_2.10.1/paida/paida_core/IDataPointSetFactory.py
|
AshleyChraya/HubbleConstant-ConstraintsForVCG
|
634c15d296147ec1cdc3c92af1fbbfeb17844586
|
[
"MIT"
] | null | null | null |
paida-3.2.1_2.10.1/paida/paida_core/IDataPointSetFactory.py
|
AshleyChraya/HubbleConstant-ConstraintsForVCG
|
634c15d296147ec1cdc3c92af1fbbfeb17844586
|
[
"MIT"
] | null | null | null |
from paida.paida_core.PAbsorber import *
from paida.paida_core.IAnnotation import *
from paida.paida_core.IDataPointSet import *
from paida.paida_core.PExceptions import *
from paida.paida_core.PUtilities import *
from math import sqrt
import types
import os
class IDataPointSetFactory:
def __init__(self, tree):
self._tree = tree
def create(self, data1, data2, data3 = None):
if (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and (type(data3) == types.IntType):
name = data1
title = data2
dimension = data3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (type(data2) == types.IntType) and (data3 == None):
name = data1
title = data1
dimension = data2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IHistogram1D') and (data3 == None):
name = data1
histogram = data2
title = histogram.title()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
axisX = histogram.axis()
for indexX in range(axisX.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binHeight = histogram.binHeight(indexX)
binError = histogram.binError(indexX)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binHeight)
measurementY.setErrorPlus(binError)
measurementY.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IHistogram1D') and (type(data3) in types.StringTypes):
name = data1
histogram = data2
options = optionAnalyzer(data3)
title = histogram.title()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
axisX = histogram.axis()
for indexX in range(axisX.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binHeight = histogram.binHeight(indexX)
binError = histogram.binError(indexX)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binHeight)
measurementY.setErrorPlus(binError)
measurementY.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IHistogram2D') and (data3 == None):
name = data1
histogram = data2
title = histogram.title()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
axisX = histogram.xAxis()
axisY = histogram.yAxis()
for indexX in range(axisX.bins()):
for indexY in range(axisY.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binCenterY = (axisY.binLowerEdge(indexY) + axisY.binUpperEdge(indexY)) / 2.0
binHeight = histogram.binHeight(indexX, indexY)
binError = histogram.binError(indexX, indexY)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binHeight)
measurementZ.setErrorPlus(binError)
measurementZ.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IHistogram2D') and (type(data3) in types.StringTypes):
name = data1
histogram = data2
options = optionAnalyzer(data3)
title = histogram.title()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
axisX = histogram.xAxis()
axisY = histogram.yAxis()
for indexX in range(axisX.bins()):
for indexY in range(axisY.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binCenterY = (axisY.binLowerEdge(indexY) + axisY.binUpperEdge(indexY)) / 2.0
binHeight = histogram.binHeight(indexX, indexY)
binError = histogram.binError(indexX, indexY)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binHeight)
measurementZ.setErrorPlus(binError)
measurementZ.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IHistogram3D') and (data3 == None):
name = data1
histogram = data2
title = histogram.title()
dimension = 4
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
axisX = histogram.xAxis()
axisY = histogram.yAxis()
axisZ = histogram.zAxis()
for indexX in range(axisX.bins()):
for indexY in range(axisY.bins()):
for indexZ in range(axisZ.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binCenterY = (axisY.binLowerEdge(indexY) + axisY.binUpperEdge(indexY)) / 2.0
binCenterZ = (axisZ.binLowerEdge(indexZ) + axisZ.binUpperEdge(indexZ)) / 2.0
binHeight = histogram.binHeight(indexX, indexY, indexZ)
binError = histogram.binError(indexX, indexY, indexZ)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binCenterZ)
measurementZ.setErrorPlus(0.0)
measurementZ.setErrorMinus(0.0)
measurementV = dataPoint.coordinate(3)
measurementV.setValue(binHeight)
measurementV.setErrorPlus(binError)
measurementV.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IHistogram3D') and (type(data3) in types.StringTypes):
name = data1
histogram = data2
options = optionAnalyzer(data3)
title = histogram.title()
dimension = 4
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
axisX = histogram.xAxis()
axisY = histogram.yAxis()
axisZ = histogram.zAxis()
for indexX in range(axisX.bins()):
for indexY in range(axisY.bins()):
for indexZ in range(axisZ.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binCenterY = (axisY.binLowerEdge(indexY) + axisY.binUpperEdge(indexY)) / 2.0
binCenterZ = (axisZ.binLowerEdge(indexZ) + axisZ.binUpperEdge(indexZ)) / 2.0
binHeight = histogram.binHeight(indexX, indexY, indexZ)
binError = histogram.binError(indexX, indexY, indexZ)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binCenterZ)
measurementZ.setErrorPlus(0.0)
measurementZ.setErrorMinus(0.0)
measurementV = dataPoint.coordinate(3)
measurementV.setValue(binHeight)
measurementV.setErrorPlus(binError)
measurementV.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'ICloud1D') and (data3 == None):
name = data1
cloud = data2
title = cloud.title()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for index in range(cloud.entries()):
binCenterX = cloud.value(index)
binHeight = cloud.weight(index)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binHeight)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'ICloud1D') and (type(data3) in types.StringTypes):
name = data1
cloud = data2
options = optionAnalyzer(data3)
title = cloud.title()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
for index in range(cloud.entries()):
binCenterX = cloud.value(index)
binHeight = cloud.weight(index)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binHeight)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'ICloud2D') and (data3 == None):
name = data1
cloud = data2
title = cloud.title()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for index in range(cloud.entries()):
binCenterX = cloud.valueX(index)
binCenterY = cloud.valueY(index)
binHeight = cloud.weight(index)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binHeight)
measurementZ.setErrorPlus(0.0)
measurementZ.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'ICloud2D') and (type(data3) in types.StringTypes):
name = data1
cloud = data2
options = optionAnalyzer(data3)
title = cloud.title()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
for index in range(cloud.entries()):
binCenterX = cloud.valueX(index)
binCenterY = cloud.valueY(index)
binHeight = cloud.weight(index)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binHeight)
measurementZ.setErrorPlus(0.0)
measurementZ.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'ICloud3D') and (data3 == None):
name = data1
cloud = data2
title = cloud.title()
dimension = 4
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for index in range(cloud.entries()):
binCenterX = cloud.valueX(index)
binCenterY = cloud.valueY(index)
binCenterZ = cloud.valueZ(index)
binHeight = cloud.weight(index)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binCenterZ)
measurementZ.setErrorPlus(0.0)
measurementZ.setErrorMinus(0.0)
measurementV = dataPoint.coordinate(2)
measurementV.setValue(binHeight)
measurementV.setErrorPlus(0.0)
measurementV.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'ICloud3D') and (type(data3) in types.StringTypes):
name = data1
cloud = data2
options = optionAnalyzer(data3)
title = cloud.title()
dimension = 4
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
for index in range(cloud.entries()):
binCenterX = cloud.valueX(index)
binCenterY = cloud.valueY(index)
binCenterZ = cloud.valueZ(index)
binHeight = cloud.weight(index)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binCenterZ)
measurementZ.setErrorPlus(0.0)
measurementZ.setErrorMinus(0.0)
measurementV = dataPoint.coordinate(2)
measurementV.setValue(binHeight)
measurementV.setErrorPlus(0.0)
measurementV.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IProfile1D') and (data3 == None):
name = data1
histogram = data2
title = histogram.title()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
axisX = histogram.axis()
for indexX in range(axisX.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binHeight = histogram.binHeight(indexX)
binError = histogram.binError(indexX)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binHeight)
measurementY.setErrorPlus(binError)
measurementY.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IProfile1D') and (type(data3) in types.StringTypes):
name = data1
histogram = data2
options = optionAnalyzer(data3)
title = histogram.title()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
axisX = histogram.axis()
for indexX in range(axisX.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binHeight = histogram.binHeight(indexX)
binError = histogram.binError(indexX)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binHeight)
measurementY.setErrorPlus(binError)
measurementY.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IProfile2D') and (data3 == None):
name = data1
histogram = data2
title = histogram.title()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
axisX = histogram.xAxis()
axisY = histogram.yAxis()
for indexX in range(axisX.bins()):
for indexY in range(axisY.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binCenterY = (axisY.binLowerEdge(indexY) + axisY.binUpperEdge(indexY)) / 2.0
binHeight = histogram.binHeight(indexX, indexY)
binError = histogram.binError(indexX, indexY)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binHeight)
measurementZ.setErrorPlus(binError)
measurementZ.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
elif (type(data1) in types.StringTypes) and (data2.__class__.__name__ == 'IProfile2D') and (type(data3) in types.StringTypes):
name = data1
histogram = data2
options = optionAnalyzer(data3)
title = histogram.title()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension, options)
axisX = histogram.xAxis()
axisY = histogram.yAxis()
for indexX in range(axisX.bins()):
for indexY in range(axisY.bins()):
binCenterX = (axisX.binLowerEdge(indexX) + axisX.binUpperEdge(indexX)) / 2.0
binCenterY = (axisY.binLowerEdge(indexY) + axisY.binUpperEdge(indexY)) / 2.0
binHeight = histogram.binHeight(indexX, indexY)
binError = histogram.binError(indexX, indexY)
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(binCenterX)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(binCenterY)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(binHeight)
measurementZ.setErrorPlus(binError)
measurementZ.setErrorMinus(binError)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
else:
raise IllegalArgumentException()
def createX(self, data1, data2, data3, data4 = None, data5 = None):
if (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and (data4 == None) and (data5 == None):
name = data1
title = data1
x = data2
xep = data3
xem = data3
elif (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and (data5 == None):
name = data1
title = data1
x = data2
xep = data3
xem = data4
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and (data5 == None):
name = data1
title = data2
x = data3
xep = data4
xem = data4
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__'):
name = data1
title = data2
x = data3
xep = data4
xem = data5
else:
raise IllegalArgumentException()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for indexX in range(len(x)):
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(x[indexX])
measurementX.setErrorPlus(xep[indexX])
measurementX.setErrorMinus(xem[indexX])
measurementY = dataPoint.coordinate(1)
measurementY.setValue(indexX)
measurementY.setErrorPlus(0.0)
measurementY.setErrorMinus(0.0)
self._tree._mkObject(name, dataPointSet)
return dataPointSet
def createY(self, data1, data2, data3, data4 = None, data5 = None):
if (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and (data4 == None) and (data5 == None):
name = data1
title = data1
y = data2
yep = data3
yem = data3
elif (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and (data5 == None):
name = data1
title = data1
y = data2
yep = data3
yem = data4
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and (data5 == None):
name = data1
title = data2
y = data3
yep = data4
yem = data4
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__'):
name = data1
title = data2
y = data3
yep = data4
yem = data5
else:
raise IllegalArgumentException()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for indexY in range(len(y)):
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(indexY)
measurementX.setErrorPlus(0.0)
measurementX.setErrorMinus(0.0)
measurementY = dataPoint.coordinate(1)
measurementY.setValue(y[indexY])
measurementY.setErrorPlus(yep[indexY])
measurementY.setErrorMinus(yem[indexY])
self._tree._mkObject(name, dataPointSet)
return dataPointSet
def createXY(self, data1, data2, data3, data4, data5, data6 = None, data7 = None, data8 = None):
if (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and (data6 == None) and (data7 == None) and (data8 == None):
name = data1
title = data1
x = data2
y = data3
xep = data4
yep = data5
xem = data4
yem = data5
elif (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and hasattr(data7, '__iter__') and (data8 == None):
name = data1
title = data1
x = data2
y = data3
xep = data4
yep = data5
xem = data6
yem = data7
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and (data7 == None) and (data8 == None):
name = data1
title = data2
x = data3
y = data4
xep = data5
yep = data6
xem = data5
yem = data6
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and hasattr(data7, '__iter__') and hasattr(data8, '__iter__'):
name = data1
title = data2
x = data3
y = data4
xep = data5
yep = data6
xem = data7
yem = data8
else:
raise IllegalArgumentException()
dimension = 2
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for indexX in range(len(x)):
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(x[indexX])
measurementX.setErrorPlus(xep[indexX])
measurementX.setErrorMinus(xem[indexX])
measurementY = dataPoint.coordinate(1)
measurementY.setValue(y[indexX])
measurementY.setErrorPlus(yep[indexX])
measurementY.setErrorMinus(yem[indexX])
self._tree._mkObject(name, dataPointSet)
return dataPointSet
def createXYZ(self, data1, data2, data3, data4, data5, data6, data7, data8 = None, data9 = None, data10 = None, data11 = None):
if (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and hasattr(data7, '__iter__') and (data8 == None) and (data9 == None) and (data10 == None) and (data11 == None):
name = data1
title = data1
x = data2
y = data3
z = data4
xep = data5
yep = data6
zep = data7
xem = data5
yem = data6
zem = data7
elif (type(data1) in types.StringTypes) and hasattr(data2, '__iter__') and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and hasattr(data7, '__iter__') and hasattr(data8, '__iter__') and hasattr(data9, '__iter__') and hasattr(data10, '__iter__') and (data11 == None):
name = data1
title = data1
x = data2
y = data3
z = data4
xep = data5
yep = data6
zep = data7
xem = data8
yem = data9
zem = data10
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and hasattr(data7, '__iter__') and hasattr(data8, '__iter__') and (data9 == None) and (data10 == None) and (data11 == None):
name = data1
title = data2
x = data3
y = data4
z = data5
xep = data6
yep = data7
zep = data8
xem = data6
yem = data7
zem = data8
elif (type(data1) in types.StringTypes) and (type(data2) in types.StringTypes) and hasattr(data3, '__iter__') and hasattr(data4, '__iter__') and hasattr(data5, '__iter__') and hasattr(data6, '__iter__') and hasattr(data7, '__iter__') and hasattr(data8, '__iter__') and hasattr(data9, '__iter__') and hasattr(data10, '__iter__') and hasattr(data11, '__iter__'):
name = data1
title = data2
x = data3
y = data4
z = data5
xep = data6
yep = data7
zep = data8
xem = data9
yem = data10
zem = data11
else:
raise IllegalArgumentException()
dimension = 3
dataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for indexX in range(len(x)):
dataPoint = dataPointSet.addPoint()
measurementX = dataPoint.coordinate(0)
measurementX.setValue(x[indexX])
measurementX.setErrorPlus(xep[indexX])
measurementX.setErrorMinus(xem[indexX])
measurementY = dataPoint.coordinate(1)
measurementY.setValue(y[indexX])
measurementY.setErrorPlus(yep[indexX])
measurementY.setErrorMinus(yem[indexX])
measurementZ = dataPoint.coordinate(2)
measurementZ.setValue(z[indexX])
measurementZ.setErrorPlus(zep[indexX])
measurementZ.setErrorMinus(zem[indexX])
self._tree._mkObject(name, dataPointSet)
return dataPointSet
def _createCopy(self, name, dataPointSet):
title = dataPointSet.title()
dimension = dataPointSet.dimension()
newDataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for offset in range(dataPointSet.size()):
newDataPointSet.addPoint(dataPointSet.point(offset))
return newDataPointSet
def createCopy(self, name, dataPointSet):
newDataPointSet = self._createCopy(os.path.basename(name), dataPointSet)
self._tree._mkObject(name, newDataPointSet)
return newDataPointSet
def destroy(self, dataPointSet):
self._tree._rmObject(dataPointSet)
def add(self, name, dataPointSet1, dataPointSet2):
if dataPointSet1.size() != dataPointSet2.size():
raise IllegalArgumentException()
if dataPointSet1.dimension() != dataPointSet2.dimension():
raise IllegalArgumentException()
title = '%s + %s' % (dataPointSet1.title(), dataPointSet2.title())
dimension = dataPointSet1.dimension()
newDataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for offset in range(dataPointSet1.size()):
dataPoint = newDataPointSet.addPoint()
dataPoint1 = dataPointSet1.point(offset)
dataPoint2 = dataPointSet2.point(offset)
for index in range(dimension):
measurement = dataPoint.coordinate(index)
measurement1 = dataPoint1.coordinate(index)
measurement2 = dataPoint2.coordinate(index)
measurement.setValue(measurement1.value() + measurement2.value())
measurement.setErrorPlus(sqrt(measurement1.errorPlus()**2 + measurement2.errorPlus()**2))
measurement.setErrorMinus(sqrt(measurement1.errorMinus()**2 + measurement2.errorMinus()**2))
self._tree._mkObject(name, newDataPointSet)
return newDataPointSet
def subtract(self, name, dataPointSet1, dataPointSet2):
if dataPointSet1.size() != dataPointSet2.size():
raise IllegalArgumentException()
if dataPointSet1.dimension() != dataPointSet2.dimension():
raise IllegalArgumentException()
title = '%s - %s' % (dataPointSet1.title(), dataPointSet2.title())
dimension = dataPointSet1.dimension()
newDataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for offset in range(dataPointSet1.size()):
dataPoint = newDataPointSet.addPoint()
dataPoint1 = dataPointSet1.point(offset)
dataPoint2 = dataPointSet2.point(offset)
for index in range(dimension):
measurement = dataPoint.coordinate(index)
measurement1 = dataPoint1.coordinate(index)
measurement2 = dataPoint2.coordinate(index)
measurement.setValue(measurement1.value() - measurement2.value())
measurement.setErrorPlus(sqrt(measurement1.errorPlus()**2 + measurement2.errorPlus()**2))
measurement.setErrorMinus(sqrt(measurement1.errorMinus()**2 + measurement2.errorMinus()**2))
self._tree._mkObject(name, newDataPointSet)
return newDataPointSet
def multiply(self, name, dataPointSet1, dataPointSet2):
if dataPointSet1.size() != dataPointSet2.size():
raise IllegalArgumentException()
if dataPointSet1.dimension() != dataPointSet2.dimension():
raise IllegalArgumentException()
title = '%s * %s' % (dataPointSet1.title(), dataPointSet2.title())
dimension = dataPointSet1.dimension()
newDataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for offset in range(dataPointSet1.size()):
dataPoint = newDataPointSet.addPoint()
dataPoint1 = dataPointSet1.point(offset)
dataPoint2 = dataPointSet2.point(offset)
for index in range(dimension):
measurement = dataPoint.coordinate(index)
measurement1 = dataPoint1.coordinate(index)
measurement2 = dataPoint2.coordinate(index)
measurement.setValue(measurement1.value() * measurement2.value())
measurement.setErrorPlus(sqrt((measurement1.errorPlus() * measurement2.value())**2 + (measurement2.errorPlus() * measurement1.value())**2))
measurement.setErrorMinus(sqrt((measurement1.errorMinus() * measurement2.value())**2 + (measurement2.errorMinus() * measurement1.value())**2))
self._tree._mkObject(name, newDataPointSet)
return newDataPointSet
def divide(self, name, dataPointSet1, dataPointSet2):
if dataPointSet1.size() != dataPointSet2.size():
raise IllegalArgumentException()
if dataPointSet1.dimension() != dataPointSet2.dimension():
raise IllegalArgumentException()
title = '%s / %s' % (dataPointSet1.title(), dataPointSet2.title())
dimension = dataPointSet1.dimension()
newDataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for offset in range(dataPointSet1.size()):
dataPoint = newDataPointSet.addPoint()
dataPoint1 = dataPointSet1.point(offset)
dataPoint2 = dataPointSet2.point(offset)
for index in range(dimension):
measurement = dataPoint.coordinate(index)
measurement1 = dataPoint1.coordinate(index)
measurement2 = dataPoint2.coordinate(index)
measurement.setValue(measurement1.value() / measurement2.value())
measurement.setErrorPlus(sqrt((measurement1.errorPlus() * measurement2.value())**2 + (measurement2.errorPlus() * measurement1.value())**2) / measurement2.value()**2)
measurement.setErrorMinus(sqrt((measurement1.errorMinus() * measurement2.value())**2 + (measurement2.errorMinus() * measurement1.value())**2) / measurement2.value()**2)
self._tree._mkObject(name, newDataPointSet)
return newDataPointSet
def weightedMean(self, name, dataPointSet1, dataPointSet2):
if dataPointSet1.size() != dataPointSet2.size():
raise IllegalArgumentException()
if dataPointSet1.dimension() != dataPointSet2.dimension():
raise IllegalArgumentException()
title = 'weighted mean of %s and %s' % (dataPointSet1.title(), dataPointSet2.title())
dimension = dataPointSet1.dimension()
newDataPointSet = IDataPointSet(os.path.basename(name), title, dimension)
for offset in range(dataPointSet1.size()):
dataPoint = newDataPointSet.addPoint()
dataPoint1 = dataPointSet1.point(offset)
dataPoint2 = dataPointSet2.point(offset)
if dataPoint1.errorPlus() != dataPoint1.errorMinus():
raise IllegalArgumentException('There are asymmetric errors.')
else:
error1 = dataPoint1.errorPlus()**2
if dataPoint2.errorPlus() != dataPoint2.errorMinus():
raise IllegalArgumentException('There are asymmetric errors.')
else:
error2 = dataPoint2.errorPlus()**2
for index in range(dimension):
measurement = dataPoint.coordinate(index)
measurement1 = dataPoint1.coordinate(index)
measurement2 = dataPoint2.coordinate(index)
measurement.setValue((measurement1.value() * error2 + measurement2.value() * error1) / (error1 + error2))
measurement.setErrorPlus(sqrt((error1 * error2) / (error1 + error2)))
measurement.setErrorMinus(measurement.errorPlus())
self._tree._mkObject(name, newDataPointSet)
return newDataPointSet
| 41.761905
| 362
| 0.723913
| 3,702
| 34,203
| 6.559157
| 0.040789
| 0.00626
| 0.031134
| 0.037188
| 0.946751
| 0.938761
| 0.935631
| 0.930483
| 0.91957
| 0.907174
| 0
| 0.028489
| 0.160512
| 34,203
| 818
| 363
| 41.812958
| 0.817191
| 0
| 0
| 0.875
| 0
| 0
| 0.024268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017677
| false
| 0
| 0.010101
| 0
| 0.065657
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
586f5ad42fd3193fe09a1947f1b1f44f38223c0c
| 11,658
|
py
|
Python
|
services/audit/tests/integ/test_audit.py
|
honzajavorek/oci-cli
|
6ea058afba323c6b3b70e98212ffaebb0d31985e
|
[
"Apache-2.0"
] | null | null | null |
services/audit/tests/integ/test_audit.py
|
honzajavorek/oci-cli
|
6ea058afba323c6b3b70e98212ffaebb0d31985e
|
[
"Apache-2.0"
] | null | null | null |
services/audit/tests/integ/test_audit.py
|
honzajavorek/oci-cli
|
6ea058afba323c6b3b70e98212ffaebb0d31985e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import datetime
import unittest
import json
import pytz
from dateutil.parser import parse
from tests import test_config_container
from tests import util
CASSETTE_LIBRARY_DIR = 'services/audit/tests/cassettes'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class TestAudit(unittest.TestCase):
# For recording, don't match on the query string because that includes the date range for the query
# (and that will change between runs)
@test_config_container.RecordReplay('audit', cassette_library_dir=CASSETTE_LIBRARY_DIR, match_on=['method', 'scheme', 'host', 'port', 'vcr_path_matcher'])
def test_all_operations(self):
"""Successfully calls every operation with basic options."""
self.subtest_event_list()
# Not present in the preview spec
# self.subtest_config_get()
def subtest_config_get(self):
util.set_admin_pass_phrase()
result = util.invoke_command_as_admin(['audit', 'config', 'get', '--compartment-id', util.TENANT_ID])
util.unset_admin_pass_phrase()
util.validate_response(result)
response = json.loads(result.output)
assert response["data"]["retention-period-days"] is not None
def subtest_event_list(self):
end_time = datetime.datetime.utcnow()
start_time = end_time + datetime.timedelta(days=-1)
result = self.invoke(['audit', 'event', 'list', '--compartment-id', util.COMPARTMENT_ID, '--start-time', start_time.strftime(DATETIME_FORMAT), '--end-time', end_time.strftime(DATETIME_FORMAT)])
assert result.exit_code == 0
if result.output:
response = json.loads(result.output)
# Some jitter because audit needs a RFC3339 date but only works with minute precision
end_time_with_zone = pytz.utc.localize(end_time) + datetime.timedelta(minutes=5)
start_time_with_zone = pytz.utc.localize(start_time) + datetime.timedelta(minutes=-5)
for event in response["data"]:
if not test_config_container.using_vcr_with_mock_responses():
parsed_date = parse(event["event-time"])
assert parsed_date >= start_time_with_zone
assert parsed_date <= end_time_with_zone
@test_config_container.RecordReplay('audit', cassette_library_dir=CASSETTE_LIBRARY_DIR)
def test_event_list(self):
start_time = datetime.datetime(2018, 9, 27)
end_time = start_time + datetime.timedelta(hours=2)
# This is the original, default version of the command.
# Subsequent commands with additional parameters should produce pretty much the same output.
result = self.invoke(
['audit', 'event', 'list',
'--all',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert result.exit_code == 0
response = json.loads(result.output)
events = response["data"]
event_count = len(events)
self.assertGreater(len(events), 0)
for event in events:
parsed_date = parse(event["event-time"])
event_name = event["event-name"]
event_source = event["event-source"]
credential_id = event["credential-id"]
# This should match the original with no changes.
result = self.invoke(
['audit', 'event', 'list',
'--all', '--stream-output',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert result.exit_code == 0
response = json.loads(result.output)
events = response["data"]
self.assertEquals(len(events), event_count)
for event in events:
parsed_date = parse(event["event-time"])
event_name = event["event-name"]
event_source = event["event-source"]
credential_id = event["credential-id"]
# This should have the same count as the original but with camelCaseNames instead of dash-names.
result = self.invoke(
['audit', 'event', 'list',
'--all', '--skip-deserialization',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert result.exit_code == 0
response = json.loads(result.output)
events = response["data"]
self.assertEquals(len(events), event_count)
for event in events:
parsed_date = parse(event["eventTime"])
event_name = event["eventName"]
event_source = event["eventSource"]
credential_id = event["credentialId"]
# This should have the same count as the original but with camelCaseNames instead of dash-names.
result = self.invoke(
['audit', 'event', 'list',
'--all', '--skip-deserialization', '--stream-output',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert result.exit_code == 0
response = json.loads(result.output)
events = response["data"]
self.assertEquals(len(events), event_count)
for event in events:
parsed_date = parse(event["eventTime"])
event_name = event["eventName"]
event_source = event["eventSource"]
credential_id = event["credentialId"]
# This should have camelCaseNames instead of dash-names.
result = self.invoke(
['audit', 'event', 'list',
'--skip-deserialization',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert result.exit_code == 0
response = json.loads(result.output)
events = response["data"]
for event in events:
parsed_date = parse(event["eventTime"])
event_name = event["eventName"]
event_source = event["eventSource"]
credential_id = event["credentialId"]
# These will throw a validation error.
result = self.invoke(
['audit', 'event', 'list',
'--stream-output',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert 'requires --all' in result.output
result = self.invoke(
['audit', 'event', 'list',
'--skip-deserialization', '--stream-output',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT)])
assert 'requires --all' in result.output
@test_config_container.RecordReplay('audit', cassette_library_dir=CASSETTE_LIBRARY_DIR)
def test_event_list_query(self):
start_time = datetime.datetime(2018, 9, 27)
end_time = start_time + datetime.timedelta(hours=2)
# This is the original, default version of the command.
# Subsequent queries with additional parameters should produce pretty much the same output.
result = self.invoke(
['audit', 'event', 'list',
'--all',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT),
'--query',
"data[?contains(\"event-name\",'DeleteBucket')].{\"event-name\":\"event-name\",\"event-source\":\"event-source\",LoginDate:\"event-time\",\"user\":\"credential-id\"}"])
assert result.exit_code == 0
events = json.loads(result.output)
event_count = len(events)
self.assertGreater(len(events), 0)
for event in events:
parsed_date = parse(event["LoginDate"])
event_name = event["event-name"]
event_source = event["event-source"]
credential_id = event["user"]
result = self.invoke(
['audit', 'event', 'list',
'--all',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT),
'--stream-output',
'--query',
"data[?contains(\"event-name\",'DeleteBucket')].{\"event-name\":\"event-name\",\"event-source\":\"event-source\",LoginDate:\"event-time\",\"user\":\"credential-id\"}"])
assert result.exit_code == 0
events = json.loads(result.output)
self.assertEquals(len(events), event_count)
for event in events:
parsed_date = parse(event["LoginDate"])
event_name = event["event-name"]
event_source = event["event-source"]
credential_id = event["user"]
# The query fields need to be camelCaseNames instead of dash-names.
result = self.invoke(
['audit', 'event', 'list',
'--all',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT),
'--skip-deserialization',
'--query',
"data[?contains(\"eventName\",'DeleteBucket')].{\"event-name\":\"eventName\",\"event-source\":\"eventSource\",LoginDate:\"eventTime\",\"user\":\"credentialId\"}"])
assert result.exit_code == 0
events = json.loads(result.output)
self.assertEquals(len(events), event_count)
for event in events:
parsed_date = parse(event["LoginDate"])
event_name = event["event-name"]
event_source = event["event-source"]
credential_id = event["user"]
# The query fields need to be camelCaseNames instead of dash-names.
result = self.invoke(
['audit', 'event', 'list',
'--all',
'--compartment-id', util.COMPARTMENT_ID,
'--start-time', start_time.strftime(DATETIME_FORMAT),
'--end-time', end_time.strftime(DATETIME_FORMAT),
'--stream-output', '--skip-deserialization',
'--query',
"data[?contains(\"eventName\",'DeleteBucket')].{\"event-name\":\"eventName\",\"event-source\":\"eventSource\",LoginDate:\"eventTime\",\"user\":\"credentialId\"}"])
assert result.exit_code == 0
events = json.loads(result.output)
self.assertEquals(len(events), event_count)
for event in events:
parsed_date = parse(event["LoginDate"])
event_name = event["event-name"]
event_source = event["event-source"]
credential_id = event["user"]
def invoke(self, commands, debug=False, ** args):
if debug is True:
commands = ['--debug'] + commands
return util.invoke_command(commands, ** args)
if __name__ == '__main__':
unittest.main()
| 46.819277
| 201
| 0.597186
| 1,264
| 11,658
| 5.344146
| 0.160601
| 0.042635
| 0.071058
| 0.092376
| 0.804441
| 0.787269
| 0.766099
| 0.761214
| 0.756773
| 0.756773
| 0
| 0.005164
| 0.269086
| 11,658
| 248
| 202
| 47.008065
| 0.787584
| 0.100618
| 0
| 0.790244
| 0
| 0
| 0.165743
| 0.017492
| 0
| 0
| 0
| 0
| 0.112195
| 1
| 0.029268
| false
| 0.009756
| 0.034146
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
588855a6dbf7a96a50165de369d399f813d28fd7
| 40,903
|
py
|
Python
|
ryu/tests/integrated/test_add_flow_v12_matches.py
|
w180112/ryu
|
aadb6609f585c287b4928db9462baf72c6410718
|
[
"Apache-2.0"
] | 975
|
2015-01-03T02:30:13.000Z
|
2020-05-07T14:01:48.000Z
|
ryu/tests/integrated/test_add_flow_v12_matches.py
|
DiegoRossiMafioletti/ryu
|
6f906e72c92e10bd0264c9b91a2f7bb85b97780c
|
[
"Apache-2.0"
] | 66
|
2020-05-22T21:55:42.000Z
|
2022-03-31T12:35:04.000Z
|
ryu/tests/integrated/test_add_flow_v12_matches.py
|
DiegoRossiMafioletti/ryu
|
6f906e72c92e10bd0264c9b91a2f7bb85b97780c
|
[
"Apache-2.0"
] | 763
|
2015-01-01T03:38:43.000Z
|
2020-05-06T15:46:09.000Z
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.tests.integrated import tester
LOG = logging.getLogger(__name__)
class RunTest(tester.TestFlowBase):
""" Test case for add flows of Matches
"""
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTest, self).__init__(*args, **kwargs)
self._verify = {}
def add_matches(self, dp, match):
m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, 0,
dp.ofproto.OFPFC_ADD,
0, 0, 0, 0xffffffff,
dp.ofproto.OFPP_ANY,
0xffffffff, 0, match, [])
dp.send_msg(m)
def _set_verify(self, headers, value, mask=None,
all_bits_masked=False, type_='int'):
self._verify = {}
self._verify['headers'] = headers
self._verify['value'] = value
self._verify['mask'] = mask
self._verify['all_bits_masked'] = all_bits_masked
self._verify['type'] = type_
def verify_default(self, dp, stats):
type_ = self._verify['type']
headers = self._verify['headers']
value = self._verify['value']
mask = self._verify['mask']
value_masked = self._masked(type_, value, mask)
all_bits_masked = self._verify['all_bits_masked']
field = None
for s in stats:
for f in s.match.fields:
if f.header in headers:
field = f
break
if field is None:
if self._is_all_zero_bit(type_, mask):
return True
return 'Field not found.'
f_value = field.value
if hasattr(field, 'mask'):
f_mask = field.mask
else:
f_mask = None
if (f_value == value) or (f_value == value_masked):
if (f_mask == mask) or (all_bits_masked and f_mask is None):
return True
return "send: %s/%s, reply: %s/%s" \
% (self._cnv_to_str(type_, value, mask, f_value, f_mask))
def _masked(self, type_, value, mask):
if mask is None:
v = value
elif type_ == 'int':
v = value & mask
elif type_ == 'mac':
v = self.haddr_masked(value, mask)
elif type_ == 'ipv4':
v = self.ipv4_masked(value, mask)
elif type_ == 'ipv6':
v = self.ipv6_masked(value, mask)
else:
raise Exception('Unknown type')
return v
def _is_all_zero_bit(self, type_, val):
if type_ == 'int' or type_ == 'ipv4':
return val == 0
elif type_ == 'mac':
for v in val:
if v != b'\x00':
return False
return True
elif type_ == 'ipv6':
for v in val:
if v != 0:
return False
return True
else:
raise Exception('Unknown type')
def _cnv_to_str(self, type_, value, mask, f_value, f_mask):
func = None
if type_ == 'int':
pass
elif type_ == 'mac':
func = self.haddr_to_str
elif type_ == 'ipv4':
func = self.ipv4_to_str
elif type_ == 'ipv6':
func = self.ipv6_to_str
else:
raise Exception('Unknown type')
if func:
value = func(value)
f_value = func(f_value)
if mask:
mask = func(mask)
if f_mask:
f_mask = func(f_mask)
return value, mask, f_value, f_mask
def test_rule_set_dl_dst(self, dp):
dl_dst = 'e2:7a:09:79:0b:0f'
dl_dst_bin = self.haddr_to_bin(dl_dst)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst(dl_dst_bin)
self.add_matches(dp, match)
def test_rule_set_dl_dst_masked_ff(self, dp):
dl_dst = 'd0:98:79:b4:75:b5'
dl_dst_bin = self.haddr_to_bin(dl_dst)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst_masked(dl_dst_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_dst_masked_f0(self, dp):
dl_dst = 'e2:7a:09:79:0b:0f'
dl_dst_bin = self.haddr_to_bin(dl_dst)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst_masked(dl_dst_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_dst_masked_00(self, dp):
dl_dst = 'e2:7a:09:79:0b:0f'
dl_dst_bin = self.haddr_to_bin(dl_dst)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
self._set_verify(headers, dl_dst_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_dst_masked(dl_dst_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src(dl_src_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src_masked_ff(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src_masked(dl_src_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src_masked_f0(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src_masked(dl_src_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_src_masked_00(self, dp):
dl_src = 'e2:7a:09:79:0b:0f'
dl_src_bin = self.haddr_to_bin(dl_src)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
self._set_verify(headers, dl_src_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_src_masked(dl_src_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_dl_type_ip(self, dp):
dl_type = ether.ETH_TYPE_IP
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_arp(self, dp):
dl_type = ether.ETH_TYPE_ARP
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_vlan(self, dp):
dl_type = ether.ETH_TYPE_8021Q
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_ipv6(self, dp):
dl_type = ether.ETH_TYPE_IPV6
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_dl_type_lacp(self, dp):
dl_type = ether.ETH_TYPE_SLOW
headers = [dp.ofproto.OXM_OF_ETH_TYPE]
self._set_verify(headers, dl_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
self.add_matches(dp, match)
def test_rule_set_ip_dscp(self, dp):
ip_dscp = 36
dl_type = ether.ETH_TYPE_IP
headers = [dp.ofproto.OXM_OF_IP_DSCP]
self._set_verify(headers, ip_dscp)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_dscp(ip_dscp)
self.add_matches(dp, match)
def test_rule_set_vlan_vid(self, dp):
vlan_vid = 0x4ef
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid(vlan_vid)
self.add_matches(dp, match)
def test_rule_set_vlan_vid_masked_ff(self, dp):
vlan_vid = 0x4ef
mask = 0xfff
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid, mask, True)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid_masked(vlan_vid, mask)
self.add_matches(dp, match)
def test_rule_set_vlan_vid_masked_f0(self, dp):
vlan_vid = 0x4ef
mask = 0xff0
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid_masked(vlan_vid, mask)
self.add_matches(dp, match)
def test_rule_set_vlan_vid_masked_00(self, dp):
vlan_vid = 0x4ef
mask = 0x000
headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
self._set_verify(headers, vlan_vid, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid_masked(vlan_vid, mask)
self.add_matches(dp, match)
def test_rule_set_vlan_pcp(self, dp):
vlan_vid = 0x4ef
vlan_pcp = 5
headers = [dp.ofproto.OXM_OF_VLAN_PCP]
self._set_verify(headers, vlan_pcp)
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid(vlan_vid)
match.set_vlan_pcp(vlan_pcp)
self.add_matches(dp, match)
def test_rule_set_ip_ecn(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_ecn = 3
headers = [dp.ofproto.OXM_OF_IP_ECN]
self._set_verify(headers, ip_ecn)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_ecn(ip_ecn)
self.add_matches(dp, match)
def test_rule_set_ip_proto_icmp(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_ICMP
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_tcp(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_TCP
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_udp(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_UDP
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_route(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ROUTING
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_frag(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_FRAGMENT
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_icmp(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_none(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_NONE
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ip_proto_ipv6_dstopts(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_DSTOPTS
headers = [dp.ofproto.OXM_OF_IP_PROTO]
self._set_verify(headers, ip_proto)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
self.add_matches(dp, match)
def test_rule_set_ipv4_src(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src(src_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_src_masked_32(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src_masked(src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_src_masked_24(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src_masked(src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_src_masked_0(self, dp):
dl_type = ether.ETH_TYPE_IP
src = '192.168.196.250'
src_int = self.ipv4_to_int(src)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
self._set_verify(headers, src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_src_masked(src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst(dst_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst_masked_32(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst_masked(dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst_masked_24(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst_masked(dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv4_dst_masked_0(self, dp):
dl_type = ether.ETH_TYPE_IP
dst = '192.168.54.155'
dst_int = self.ipv4_to_int(dst)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
self._set_verify(headers, dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv4_dst_masked(dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_tcp_src(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_TCP
tp_src = 1103
headers = [dp.ofproto.OXM_OF_TCP_SRC]
self._set_verify(headers, tp_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_tcp_src(tp_src)
self.add_matches(dp, match)
def test_rule_set_tcp_dst(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_TCP
tp_dst = 236
headers = [dp.ofproto.OXM_OF_TCP_DST]
self._set_verify(headers, tp_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_tcp_dst(tp_dst)
self.add_matches(dp, match)
def test_rule_set_udp_src(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_UDP
tp_src = 56617
headers = [dp.ofproto.OXM_OF_UDP_SRC]
self._set_verify(headers, tp_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_udp_src(tp_src)
self.add_matches(dp, match)
def test_rule_set_udp_dst(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_UDP
tp_dst = 61278
headers = [dp.ofproto.OXM_OF_UDP_DST]
self._set_verify(headers, tp_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_udp_dst(tp_dst)
self.add_matches(dp, match)
def test_rule_set_icmpv4_type(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_ICMP
icmp_type = 8
headers = [dp.ofproto.OXM_OF_ICMPV4_TYPE]
self._set_verify(headers, icmp_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv4_type(icmp_type)
self.add_matches(dp, match)
def test_rule_set_icmpv4_code(self, dp):
dl_type = ether.ETH_TYPE_IP
ip_proto = inet.IPPROTO_ICMP
icmp_type = 9
icmp_code = 16
headers = [dp.ofproto.OXM_OF_ICMPV4_CODE]
self._set_verify(headers, icmp_code)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv4_type(icmp_type)
match.set_icmpv4_code(icmp_code)
self.add_matches(dp, match)
def test_rule_set_arp_opcode(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_op = 1
headers = [dp.ofproto.OXM_OF_ARP_OP]
self._set_verify(headers, arp_op)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_opcode(arp_op)
self.add_matches(dp, match)
def test_rule_set_arp_spa(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa(nw_src_int)
self.add_matches(dp, match)
def test_rule_set_arp_spa_masked_32(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa_masked(nw_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_spa_masked_24(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa_masked(nw_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_spa_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_src = '192.168.222.57'
nw_src_int = self.ipv4_to_int(nw_src)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
self._set_verify(headers, nw_src_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_spa_masked(nw_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa(nw_dst_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa_masked_32(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
mask = '255.255.255.255'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, mask_int, True, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa_masked(nw_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa_masked_24(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
mask = '255.255.255.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa_masked(nw_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_tpa_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
nw_dst = '192.168.198.233'
nw_dst_int = self.ipv4_to_int(nw_dst)
mask = '0.0.0.0'
mask_int = self.ipv4_to_int(mask)
headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
self._set_verify(headers, nw_dst_int, mask_int, type_='ipv4')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tpa_masked(nw_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_arp_sha(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha(arp_sha_bin)
self.add_matches(dp, match)
def test_rule_set_arp_sha_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha_masked(arp_sha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_sha_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha_masked(arp_sha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_sha_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_sha = '3e:ec:13:9b:f3:0b'
arp_sha_bin = self.haddr_to_bin(arp_sha)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
self._set_verify(headers, arp_sha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_sha_masked(arp_sha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha(arp_tha_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
mask = 'ff:ff:ff:ff:ff:ff'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, mask_bin, True, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha_masked(arp_tha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
mask = 'ff:ff:ff:ff:ff:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha_masked(arp_tha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_arp_tha_masked_00(self, dp):
dl_type = ether.ETH_TYPE_ARP
arp_tha = '83:6c:21:52:49:68'
arp_tha_bin = self.haddr_to_bin(arp_tha)
mask = '00:00:00:00:00:00'
mask_bin = self.haddr_to_bin(mask)
headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
self._set_verify(headers, arp_tha_bin, mask_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_arp_tha_masked(arp_tha_bin, mask_bin)
self.add_matches(dp, match)
def test_rule_set_ipv6_src(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src(ipv6_src_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_src_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, mask_int, True, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src_masked(ipv6_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_src_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src_masked(ipv6_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_src_masked_00(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
ipv6_src_int = self.ipv6_to_int(ipv6_src)
mask = '0:0:0:0:0:0:0:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
self._set_verify(headers, ipv6_src_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_src_masked(ipv6_src_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst(ipv6_dst_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, mask_int, True, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_dst_masked_00(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
mask = '0:0:0:0:0:0:0:0'
mask_int = self.ipv6_to_int(mask)
headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
self._set_verify(headers, ipv6_dst_int, mask_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel(ipv6_label)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel_masked_ff(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
mask = 0xfffff
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label, mask, True)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel_masked(ipv6_label, mask)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel_masked_f0(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
mask = 0xffff0
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel_masked(ipv6_label, mask)
self.add_matches(dp, match)
def test_rule_set_ipv6_flabel_masked_00(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ipv6_label = 0xc5384
mask = 0x0
headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
dp.ofproto.OXM_OF_IPV6_FLABEL_W]
self._set_verify(headers, ipv6_label, mask)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ipv6_flabel_masked(ipv6_label, mask)
self.add_matches(dp, match)
def test_rule_set_icmpv6_type(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 129
headers = [dp.ofproto.OXM_OF_ICMPV6_TYPE]
self._set_verify(headers, icmp_type)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
self.add_matches(dp, match)
def test_rule_set_icmpv6_code(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 138
icmp_code = 1
headers = [dp.ofproto.OXM_OF_ICMPV6_CODE]
self._set_verify(headers, icmp_code)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_icmpv6_code(icmp_code)
self.add_matches(dp, match)
def test_rule_set_ipv6_nd_target(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 135
target = "5420:db3f:921b:3e33:2791:98f:dd7f:2e19"
target_int = self.ipv6_to_int(target)
headers = [dp.ofproto.OXM_OF_IPV6_ND_TARGET]
self._set_verify(headers, target_int, type_='ipv6')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_ipv6_nd_target(target_int)
self.add_matches(dp, match)
def test_rule_set_ipv6_nd_sll(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 135
nd_sll = "93:6d:d0:d4:e8:36"
nd_sll_bin = self.haddr_to_bin(nd_sll)
headers = [dp.ofproto.OXM_OF_IPV6_ND_SLL]
self._set_verify(headers, nd_sll_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_ipv6_nd_sll(nd_sll_bin)
self.add_matches(dp, match)
def test_rule_set_ipv6_nd_tll(self, dp):
dl_type = ether.ETH_TYPE_IPV6
ip_proto = inet.IPPROTO_ICMPV6
icmp_type = 136
nd_tll = "18:f6:66:b6:f1:b3"
nd_tll_bin = self.haddr_to_bin(nd_tll)
headers = [dp.ofproto.OXM_OF_IPV6_ND_TLL]
self._set_verify(headers, nd_tll_bin, type_='mac')
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_ip_proto(ip_proto)
match.set_icmpv6_type(icmp_type)
match.set_ipv6_nd_tll(nd_tll_bin)
self.add_matches(dp, match)
def test_rule_set_mpls_label(self, dp):
dl_type = 0x8847
label = 2144
headers = [dp.ofproto.OXM_OF_MPLS_LABEL]
self._set_verify(headers, label)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_mpls_label(label)
self.add_matches(dp, match)
def test_rule_set_mpls_tc(self, dp):
dl_type = 0x8847
tc = 3
headers = [dp.ofproto.OXM_OF_MPLS_TC]
self._set_verify(headers, tc)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(dl_type)
match.set_mpls_tc(tc)
self.add_matches(dp, match)
def is_supported(self, t):
# Open vSwitch 1.10 does not support MPLS yet.
unsupported = [
'test_rule_set_mpls_label',
'test_rule_set_mpls_tc',
]
for u in unsupported:
if t.find(u) != -1:
return False
return True
| 34.085833
| 77
| 0.647948
| 6,370
| 40,903
| 3.753061
| 0.048823
| 0.077927
| 0.063245
| 0.073786
| 0.887188
| 0.866692
| 0.844941
| 0.83645
| 0.832894
| 0.830133
| 0
| 0.03688
| 0.252231
| 40,903
| 1,199
| 78
| 34.114262
| 0.744753
| 0.017383
| 0
| 0.703463
| 0
| 0
| 0.044586
| 0.013069
| 0
| 0
| 0.002913
| 0
| 0
| 1
| 0.093074
| false
| 0.001082
| 0.005411
| 0
| 0.114719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
588994b72a4f157cc2c0270790cbe09c217372b5
| 17,811
|
py
|
Python
|
webapp/tests/forms/validations/test_date_validation.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 20
|
2021-07-02T07:49:08.000Z
|
2022-03-18T22:26:10.000Z
|
webapp/tests/forms/validations/test_date_validation.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 555
|
2021-06-28T15:35:15.000Z
|
2022-03-31T11:51:55.000Z
|
webapp/tests/forms/validations/test_date_validation.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 1
|
2021-07-04T20:34:12.000Z
|
2021-07-04T20:34:12.000Z
|
import unittest
from werkzeug.datastructures import MultiDict
from flask_babel import _
from app.forms.validations.date_validations import ValidDateOfBirth, ValidDateOfDeath, ValidDateOfDivorce, ValidDateOfMarriage
from app.forms.fields import SteuerlotseDateField
from app.forms import SteuerlotseBaseForm
class DateForm(SteuerlotseBaseForm):
date_field = SteuerlotseDateField()
class TestValidDateOfBirth(unittest.TestCase):
def setUp(self):
self.form = DateForm()
self.validator = ValidDateOfBirth()
def test_date_of_birth_is_valid(self):
"""
GIVEN a valid date 1.1.2020
WHEN validation executed
THEN no ValueErrors expected
"""
# Arrange
self.is_valid = False
self.wrong_date_input = ['1', '1', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
self.validation_error = ''
# Act
try:
self.validator(self.form, self.form.date_field)
self.is_valid = True
except Exception as ex:
self.validation_error = str(ex)
# Assert
self.assertTrue(self.is_valid, f'Error raised:{self.validation_error}')
def test_date_of_birth_too_far_in_past_throws_ValueError(self):
"""
GIVEN a date before 1.1.1900
WHEN validation executed
THEN ValueError with message validate.date-of-to-far-in-past expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '1899']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-to-far-in-past'))
def test_date_of_birth_in_the_future_throws_ValueError(self):
"""
GIVEN a date in the future 9.9.9999
WHEN validation executed
THEN ValueError with message validate.date-of-in-the-future expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '9999']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-in-the-future'))
def test_invalid_date_of_birth_throws_ValueError(self):
"""
GIVEN a invalid date 99.99.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-incorrect expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['99', '99', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-incorrect'))
def test_incomplete_date_of_birth_throws_ValueError(self):
"""
GIVEN a incomplete date __.__.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-birth-incomplete expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['', '', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-birth-incomplete'))
class TestValidDateOfMarriage(unittest.TestCase):
def setUp(self):
self.form = DateForm()
self.validator = ValidDateOfMarriage()
def test_date_of_marriage_is_valid(self):
"""
GIVEN a valid date 1.1.2020
WHEN validation executed
THEN no ValueErrors expected
"""
# Arrange
self.is_valid = False
self.wrong_date_input = ['1', '1', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
self.validation_error = ''
# Act
try:
self.validator(self.form, self.form.date_field)
self.is_valid = True
except Exception as ex:
self.validation_error = str(ex)
# Assert
self.assertTrue(self.is_valid, f'Error raised:{self.validation_error}')
def test_date_of_marriage_too_far_in_past_throws_ValueError(self):
"""
GIVEN a date before 1.1.1900
WHEN validation executed
THEN ValueError with message validate.date-of-to-far-in-past expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '1899']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-to-far-in-past'))
def test_date_of_marriage_in_the_future_throws_ValueError(self):
"""
GIVEN a date in the future 9.9.9999
WHEN validation executed
THEN ValueError with message validate.date-of-in-the-future expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '9999']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-in-the-future'))
def test_invalid_date_of_marriage_throws_ValueError(self):
"""
GIVEN a invalid date 99.99.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-incorrect expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['99', '99', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-incorrect'))
def test_incomplete_date_of_marriage_throws_ValueError(self):
"""
GIVEN a incomplete date __.__.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-marriage-incomplete expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['', '', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-marriage-incomplete'))
class TestValidDateOfDivorce(unittest.TestCase):
def setUp(self):
self.form = DateForm()
self.validator = ValidDateOfDivorce()
def test_date_of_divorce_is_valid(self):
"""
GIVEN a valid date 1.1.2020
WHEN validation executed
THEN no ValueErrors expected
"""
# Arrange
self.is_valid = False
self.wrong_date_input = ['1', '1', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
self.validation_error = ''
# Act
try:
self.validator(self.form, self.form.date_field)
self.is_valid = True
except Exception as ex:
self.validation_error = str(ex)
# Assert
self.assertTrue(self.is_valid, f'Error raised:{self.validation_error}')
def test_date_of_divorce_too_far_in_past_throws_ValueError(self):
"""
GIVEN a date before 1.1.1900
WHEN validation executed
THEN ValueError with message validate.date-of-to-far-in-past expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '1899']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-to-far-in-past'))
def test_date_of_divorce_in_the_future_throws_ValueError(self):
"""
GIVEN a date in the future 9.9.9999
WHEN validation executed
THEN ValueError with message validate.date-of-in-the-future expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '9999']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-in-the-future'))
def test_invalid_date_of_divorce_throws_ValueError(self):
"""
GIVEN a invalid date 99.99.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-incorrect expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['99', '99', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-incorrect'))
def test_incomplete_date_of_divorce_throws_ValueError(self):
"""
GIVEN a incomplete date __.__.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-incomplete expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['', '', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-incomplete'))
class TestValidDateOfDeath(unittest.TestCase):
def setUp(self):
self.form = DateForm()
self.validator = ValidDateOfDeath()
def test_date_of_death_is_valid(self):
"""
GIVEN a valid date 1.1.2020
WHEN validation executed
THEN no ValueErrors expected
"""
# Arrange
self.is_valid = False
self.wrong_date_input = ['1', '1', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
self.validation_error = ''
# Act
try:
self.validator(self.form, self.form.date_field)
self.is_valid = True
except Exception as ex:
self.validation_error = str(ex)
# Assert
self.assertTrue(self.is_valid, f'Error raised:{self.validation_error}')
def test_date_of_death_too_far_in_past_throws_ValueError(self):
"""
GIVEN a date before 1.1.1900
WHEN validation executed
THEN ValueError with message validate.date-of-to-far-in-past expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '1899']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-to-far-in-past'))
def test_date_of_death_in_the_future_throws_ValueError(self):
"""
GIVEN a date in the future 9.9.9999
WHEN validation executed
THEN ValueError with message validate.date-of-in-the-future expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['1', '1', '9999']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-in-the-future'))
def test_invalid_date_of_death_throws_ValueError(self):
"""
GIVEN a invalid date 99.99.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-incorrect expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['99', '99', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-incorrect'))
def test_incomplete_date_of_death_throws_ValueError(self):
"""
GIVEN a incomplete date __.__.2020
WHEN validation executed
THEN ValueError with message validate.validate.date-of-death-incomplete expected
"""
# Arrange
self.is_valid = True
self.wrong_date_input = ['', '', '2020']
self.form.process(formdata=MultiDict({'date_field': self.wrong_date_input}))
# Act
try:
self.validator(self.form, self.form.date_field)
except ValueError as ex:
self.is_valid = False
self.validation_error = str(ex)
# Assert
self.assertFalse(self.is_valid, 'ValueError expected')
self.assertEqual(self.validation_error, _('validate.date-of-death-incomplete'))
| 35.981818
| 126
| 0.57835
| 1,945
| 17,811
| 5.092545
| 0.047815
| 0.051691
| 0.066633
| 0.072691
| 0.948107
| 0.928622
| 0.928622
| 0.928622
| 0.923574
| 0.923574
| 0
| 0.020062
| 0.328336
| 17,811
| 495
| 127
| 35.981818
| 0.807908
| 0.165796
| 0
| 0.836066
| 0
| 0
| 0.088859
| 0.042609
| 0
| 0
| 0
| 0
| 0.147541
| 1
| 0.098361
| false
| 0
| 0.02459
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54503da98c8c6238936771c8fc653d6da02a0b15
| 4,059
|
py
|
Python
|
sijuiacion_lang/sijuiacion.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 21
|
2019-10-13T14:11:32.000Z
|
2021-12-14T02:42:12.000Z
|
sijuiacion_lang/sijuiacion.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 1
|
2020-01-07T13:14:46.000Z
|
2020-01-09T16:58:07.000Z
|
sijuiacion_lang/sijuiacion.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 1
|
2020-08-13T16:17:09.000Z
|
2020-08-13T16:17:09.000Z
|
from enum import Enum, auto as _auto
import abc
import typing as t
from dataclasses import dataclass
from sijuiacion_lang.support import *
class Instr:
pass
class UOp(Enum):
POSITIVE = _auto()
NEGATIVE = _auto()
NOT = _auto()
INVERT = _auto()
pass
class BinOp(Enum):
POWER = _auto()
MULTIPLY = _auto()
MATRIX_MULTIPLY = _auto()
FLOOR_DIVIDE = _auto()
TRUE_DIVIDE = _auto()
MODULO = _auto()
ADD = _auto()
SUBTRACT = _auto()
SUBSCR = _auto()
LSHIFT = _auto()
RSHIFT = _auto()
AND = _auto()
XOR = _auto()
OR = _auto()
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Load(Instr):
name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Store(Instr):
name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Deref(Instr):
name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class RefSet(Instr):
name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Glob(Instr):
name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class GlobSet(Instr):
name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Const(Instr):
val:object
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Extern(Instr):
code:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class SimpleRaise(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Unpack(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Pop(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class ROT(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class DUP(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Goto(Instr):
label_name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class GotoEq(Instr):
label_name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class GotoNEq(Instr):
label_name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Label(Instr):
label_name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class BlockAddr(Instr):
label_name:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Indir(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Switch(Instr):
table:t.Dict[int,
str]
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Bin(Instr):
op:BinOp
pass
@adt_recog
@dataclass(frozen=True, order=True)
class IBin(Instr):
op:BinOp
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Un(Instr):
op:UOp
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Cmp(Instr):
op:Compare
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Attr(Instr):
attr:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class AttrSet(Instr):
attr:str
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Item(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class ItemSet(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Call(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Print(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class BuildList(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class BuildTuple(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class ListAppend(Instr):
n:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Return(Instr):
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Line(Instr):
no:int
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Defun(Instr):
doc:str
filename:str
free:t.List[str]
name:str
args:t.List[str]
suite:t.List[t.Union[Line,Instr]]
pass
@adt_recog
@dataclass(frozen=True, order=True)
class Mod:
filename:str
tops:t.List[Instr]
pass
| 13.666667
| 37
| 0.681449
| 569
| 4,059
| 4.746924
| 0.172232
| 0.09589
| 0.164384
| 0.287671
| 0.733802
| 0.733802
| 0.733802
| 0.733802
| 0.733802
| 0.66716
| 0
| 0
| 0.198325
| 4,059
| 296
| 38
| 13.712838
| 0.830055
| 0
| 0
| 0.649533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.186916
| 0.023364
| 0
| 0.462617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
54524a8425011b5df404134db1843d31bc4e7ab6
| 4,478
|
py
|
Python
|
tests/test_mstomp.py
|
MORE-EU/matrixprofile
|
7c598385f7723f337d7bf7d3f90cffb690c6b0df
|
[
"Apache-2.0"
] | 262
|
2020-02-28T20:42:27.000Z
|
2022-03-30T14:02:28.000Z
|
tests/test_mstomp.py
|
MORE-EU/matrixprofile
|
7c598385f7723f337d7bf7d3f90cffb690c6b0df
|
[
"Apache-2.0"
] | 79
|
2020-03-01T01:42:14.000Z
|
2022-03-30T07:15:48.000Z
|
tests/test_mstomp.py
|
MORE-EU/matrixprofile
|
7c598385f7723f337d7bf7d3f90cffb690c6b0df
|
[
"Apache-2.0"
] | 56
|
2020-03-03T14:56:27.000Z
|
2022-03-22T07:18:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import os
import pytest
import numpy as np
from matrixprofile.algorithms.mstomp import mstomp
def test_mstomp_window_size_less_than_4():
ts = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [8, 7, 6, 5, 4, 3, 2, 1]])
w = 2
with pytest.raises(ValueError) as excinfo:
mstomp(ts, w)
assert 'window size must be at least 4.' in str(excinfo.value)
def test_mstomp_time_series_too_small():
ts = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [8, 7, 6, 5, 4, 3, 2, 1]])
w = 8
with pytest.raises(ValueError) as excinfo:
mstomp(ts, w)
assert 'Time series is too short' in str(excinfo.value)
def test_mstomp_single_dimension():
ts = np.array([0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0])
w = 4
desired_mp = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]])
desired_pi = np.array([[4, 5, 6, 7, 0, 1, 2, 3, 0]])
desired_lmp = np.array([[np.inf, np.inf, np.inf, 2.82842712, 0, 0, 0, 0, 0]])
desired_lpi = np.array([[0, 0, 0, 0, 0, 1, 2, 3, 0]])
desired_rmp = np.array([[0, 0, 0, 0, 0, 2.82842712, np.inf, np.inf, np.inf]])
desired_rpi = np.array([[4, 5, 6, 7, 8, 8, 0, 0, 0]])
profile = mstomp(ts, w, n_jobs=1)
np.testing.assert_almost_equal(profile['mp'], desired_mp)
np.testing.assert_almost_equal(profile['pi'], desired_pi)
np.testing.assert_almost_equal(profile['lmp'], desired_lmp)
np.testing.assert_almost_equal(profile['lpi'], desired_lpi)
np.testing.assert_almost_equal(profile['rmp'], desired_rmp)
np.testing.assert_almost_equal(profile['rpi'], desired_rpi)
def test_mstomp_multi_dimension():
ts = np.array([[0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1]])
w = 4
desired_mp = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 9.19401687e-01, 9.19401687e-01, 2.98023224e-08, 0, 9.19401687e-01, 9.19401687e-01, 9.19401687e-01]])
desired_pi = np.array([[4, 5, 6, 7, 0, 1, 2, 3, 0], [4, 5, 6, 7, 0, 1, 2, 3, 0]])
desired_pd = [
np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]]),
np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1]])
]
profile = mstomp(ts, w, return_dimension=True, n_jobs=1)
np.testing.assert_almost_equal(profile['mp'], desired_mp)
np.testing.assert_almost_equal(profile['pi'], desired_pi)
for i in range(len(ts)):
np.testing.assert_almost_equal(profile['pd'][i], desired_pd[i])
def test_mstomp_single_dimension_multi_threaded():
ts = np.array([0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0])
w = 4
desired_mp = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]])
desired_pi = np.array([[4, 5, 6, 7, 0, 1, 2, 3, 0]])
desired_lmp = np.array([[np.inf, np.inf, np.inf, 2.82842712, 0, 0, 0, 0, 0]])
desired_lpi = np.array([[0, 0, 0, 0, 0, 1, 2, 3, 0]])
desired_rmp = np.array([[0, 0, 0, 0, 0, 2.82842712, np.inf, np.inf, np.inf]])
desired_rpi = np.array([[4, 5, 6, 7, 8, 8, 0, 0, 0]])
profile = mstomp(ts, w, n_jobs=-1)
np.testing.assert_almost_equal(profile['mp'], desired_mp)
np.testing.assert_almost_equal(profile['pi'], desired_pi)
np.testing.assert_almost_equal(profile['lmp'], desired_lmp)
np.testing.assert_almost_equal(profile['lpi'], desired_lpi)
np.testing.assert_almost_equal(profile['rmp'], desired_rmp)
np.testing.assert_almost_equal(profile['rpi'], desired_rpi)
def test_mstomp_multi_dimension_multi_threaded():
ts = np.array([[0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1]])
w = 4
desired_mp = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 9.19401687e-01, 9.19401687e-01, 2.98023224e-08, 0, 9.19401687e-01, 9.19401687e-01, 9.19401687e-01]])
desired_pi = np.array([[4, 5, 6, 7, 0, 1, 2, 3, 0], [4, 5, 6, 7, 0, 1, 2, 3, 0]])
desired_pd = [
np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]]),
np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1]])
]
profile = mstomp(ts, w, return_dimension=True, n_jobs=-1)
np.testing.assert_almost_equal(profile['mp'], desired_mp)
np.testing.assert_almost_equal(profile['pi'], desired_pi)
for i in range(len(ts)):
np.testing.assert_almost_equal(profile['pd'][i], desired_pd[i])
| 37.630252
| 116
| 0.599151
| 825
| 4,478
| 3.09697
| 0.118788
| 0.08454
| 0.093933
| 0.100196
| 0.852055
| 0.84227
| 0.84227
| 0.818787
| 0.818787
| 0.818787
| 0
| 0.138748
| 0.208129
| 4,478
| 119
| 117
| 37.630252
| 0.581782
| 0.017642
| 0
| 0.705882
| 0
| 0
| 0.02388
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.070588
| false
| 0
| 0.094118
| 0
| 0.164706
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
548fe6b0af158942e855b02a19d763ab1f6f5685
| 5,974
|
py
|
Python
|
pyresponse/explicit_equations_partial.py
|
berquist/pyresponse
|
3267b0ca1e5b2e638cd2388532897f2749af8397
|
[
"BSD-3-Clause"
] | 9
|
2017-09-26T07:02:42.000Z
|
2021-12-15T15:01:16.000Z
|
pyresponse/explicit_equations_partial.py
|
berquist/pyresponse
|
3267b0ca1e5b2e638cd2388532897f2749af8397
|
[
"BSD-3-Clause"
] | 12
|
2018-02-17T22:32:41.000Z
|
2021-09-19T03:25:13.000Z
|
pyresponse/explicit_equations_partial.py
|
berquist/pyresponse
|
3267b0ca1e5b2e638cd2388532897f2749af8397
|
[
"BSD-3-Clause"
] | 4
|
2018-03-25T01:16:16.000Z
|
2022-02-28T07:55:52.000Z
|
r"""Explicit equations for orbital Hessian terms using partially-transformed MO-basis two-electron integrals, *e.g.*, :math:`(ia|jb), (ij|ab)`."""
import numpy as np
from pyresponse.utils import form_vec_energy_differences
def form_rpa_a_matrix_mo_singlet_partial(
E_MO: np.ndarray, TEI_MO_iajb: np.ndarray, TEI_MO_ijab: np.ndarray
) -> np.ndarray:
r"""Form the A (CIS) matrix in the MO basis. [singlet]
The equation for element :math:`\{ia,jb\}` is
:math:`\left<aj||ib\right> = \left<aj|ib\right> -
\left<aj|bi\right> = [ai|jb] - [ab|ji] = 2(ai|jb) - (ab|ji)`. It
also includes the virt-occ energy difference on the diagonal.
"""
shape_iajb = TEI_MO_iajb.shape
shape_ijab = TEI_MO_ijab.shape
assert len(shape_iajb) == len(shape_ijab) == 4
assert shape_iajb[0] == shape_iajb[2] == shape_ijab[0] == shape_ijab[1]
assert shape_iajb[1] == shape_iajb[3] == shape_ijab[2] == shape_ijab[3]
nocc = shape_iajb[0]
nvirt = shape_iajb[1]
norb = nocc + nvirt
assert len(E_MO.shape) == 2
assert E_MO.shape[0] == E_MO.shape[1] == norb
nov = nocc * nvirt
ediff = form_vec_energy_differences(np.diag(E_MO)[:nocc], np.diag(E_MO)[nocc:])
A = 2 * TEI_MO_iajb
A -= TEI_MO_ijab.swapaxes(1, 2)
A.shape = (nov, nov)
A += np.diag(ediff)
return A
def form_rpa_a_matrix_mo_triplet_partial(E_MO: np.ndarray, TEI_MO_ijab: np.ndarray) -> np.ndarray:
r"""Form the A (CIS) matrix in the MO basis. [triplet]
The equation for element :math:`\{ia,jb\}` is :math:`-
\left<aj|bi\right> = - [ab|ji] = - (ab|ji)`. It also includes the
virt-occ energy difference on the diagonal.
"""
shape_ijab = TEI_MO_ijab.shape
assert len(shape_ijab) == 4
assert shape_ijab[0] == shape_ijab[1]
assert shape_ijab[2] == shape_ijab[3]
nocc = shape_ijab[0]
nvirt = shape_ijab[2]
norb = nocc + nvirt
assert len(E_MO.shape) == 2
assert E_MO.shape[0] == E_MO.shape[1] == norb
nov = nocc * nvirt
ediff = form_vec_energy_differences(np.diag(E_MO)[:nocc], np.diag(E_MO)[nocc:])
A = np.zeros((nocc, nvirt, nocc, nvirt))
A -= TEI_MO_ijab.swapaxes(1, 2)
A.shape = (nov, nov)
A += np.diag(ediff)
return A
def form_rpa_b_matrix_mo_singlet_partial(TEI_MO_iajb: np.ndarray) -> np.ndarray:
r"""Form the B matrix for RPA in the MO basis. [singlet]
The equation for element :math:`\{ia,jb\}` is
:math:`\left<ab||ij\right> = \left<ab|ij\right> -
\left<ab|ji\right> = [ai|bj] - [aj|bi] = 2(ai|bj) - (aj|bi)`.
"""
shape_iajb = TEI_MO_iajb.shape
assert len(shape_iajb) == 4
assert shape_iajb[0] == shape_iajb[2]
assert shape_iajb[1] == shape_iajb[3]
nocc = shape_iajb[0]
nvirt = shape_iajb[1]
nov = nocc * nvirt
B = 2 * TEI_MO_iajb
B -= TEI_MO_iajb.swapaxes(1, 3)
B.shape = (nov, nov)
return -B
def form_rpa_b_matrix_mo_triplet_partial(TEI_MO_iajb: np.ndarray) -> np.ndarray:
r"""Form the B matrix for RPA in the MO basis. [triplet]
The equation for element :math:`\{ia,jb\}` is :math:`????`.
"""
shape_iajb = TEI_MO_iajb.shape
assert len(shape_iajb) == 4
assert shape_iajb[0] == shape_iajb[2]
assert shape_iajb[1] == shape_iajb[3]
nocc = shape_iajb[0]
nvirt = shape_iajb[1]
nov = nocc * nvirt
B = np.zeros((nocc, nvirt, nocc, nvirt))
B -= TEI_MO_iajb.swapaxes(1, 3)
B.shape = (nov, nov)
return -B
def form_rpa_a_matrix_mo_singlet_ss_partial(
E_MO: np.ndarray, TEI_MO_iajb: np.ndarray, TEI_MO_ijab: np.ndarray
) -> np.ndarray:
r"""Form the same-spin part of the A (CIS) matrix in the MO
basis. [singlet]
The equation for element :math:`\{ia,jb\}` is :math:`????`.
"""
shape_iajb = TEI_MO_iajb.shape
shape_ijab = TEI_MO_ijab.shape
assert len(shape_iajb) == len(shape_ijab) == 4
assert shape_iajb[0] == shape_iajb[2] == shape_ijab[0] == shape_ijab[1]
assert shape_iajb[1] == shape_iajb[3] == shape_ijab[2] == shape_ijab[3]
nocc = shape_iajb[0]
nvirt = shape_iajb[1]
norb = nocc + nvirt
assert len(E_MO.shape) == 2
assert E_MO.shape[0] == E_MO.shape[1] == norb
nov = nocc * nvirt
ediff = form_vec_energy_differences(np.diag(E_MO)[:nocc], np.diag(E_MO)[nocc:])
A = TEI_MO_iajb.copy()
A -= TEI_MO_ijab.swapaxes(1, 2)
A.shape = (nov, nov)
A += np.diag(ediff)
return A
def form_rpa_a_matrix_mo_singlet_os_partial(TEI_MO_iajb_xxyy: np.ndarray) -> np.ndarray:
r"""Form the opposite-spin part of the A (CIS) matrix in the MO
basis. [singlet]
The equation for element :math:`\{ia,jb\}` is :math:`????`.
"""
shape = TEI_MO_iajb_xxyy.shape
assert len(shape) == 4
nocc_x, nvirt_x, nocc_y, nvirt_y = shape
nov_x = nocc_x * nvirt_x
nov_y = nocc_y * nvirt_y
A = TEI_MO_iajb_xxyy.copy()
A.shape = (nov_x, nov_y)
return A
def form_rpa_b_matrix_mo_singlet_ss_partial(TEI_MO_iajb: np.ndarray) -> np.ndarray:
r"""Form the same-spin part of the RPA B matrix in the MO
basis. [singlet]
The equation for element :math:`\{ia,jb\}` is :math:`????`.
"""
shape_iajb = TEI_MO_iajb.shape
assert len(shape_iajb) == 4
assert shape_iajb[0] == shape_iajb[2]
assert shape_iajb[1] == shape_iajb[3]
nocc = shape_iajb[0]
nvirt = shape_iajb[1]
nov = nocc * nvirt
B = TEI_MO_iajb.copy()
B -= TEI_MO_iajb.swapaxes(1, 3)
B.shape = (nov, nov)
return -B
def form_rpa_b_matrix_mo_singlet_os_partial(TEI_MO_iajb_xxyy: np.ndarray) -> np.ndarray:
r"""Form the opposite-spin part of the RPA B matrix in the MO
basis. [singlet]
The equation for element :math:`\{ia,jb\}` is :math:`????`.
"""
shape = TEI_MO_iajb_xxyy.shape
assert len(shape) == 4
nocc_x, nvirt_x, nocc_y, nvirt_y = shape
nov_x = nocc_x * nvirt_x
nov_y = nocc_y * nvirt_y
B = TEI_MO_iajb_xxyy.copy()
B.shape = (nov_x, nov_y)
return -B
| 28.859903
| 146
| 0.638266
| 1,010
| 5,974
| 3.541584
| 0.094059
| 0.100643
| 0.05787
| 0.040257
| 0.915851
| 0.905228
| 0.861616
| 0.847079
| 0.831703
| 0.809058
| 0
| 0.016439
| 0.215936
| 5,974
| 206
| 147
| 29
| 0.747225
| 0.250586
| 0
| 0.721311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213115
| 1
| 0.065574
| false
| 0
| 0.016393
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f8529bcaaa6c08f40b06a60cfda9c87ecda4aee
| 40,511
|
py
|
Python
|
SV&MVScode/nn_models.py
|
FrankLicm/Dialogue-Reading-Comprehension
|
de244b3864ef0f4421df7cb931e1d0eb9434f472
|
[
"Apache-2.0"
] | null | null | null |
SV&MVScode/nn_models.py
|
FrankLicm/Dialogue-Reading-Comprehension
|
de244b3864ef0f4421df7cb931e1d0eb9434f472
|
[
"Apache-2.0"
] | null | null | null |
SV&MVScode/nn_models.py
|
FrankLicm/Dialogue-Reading-Comprehension
|
de244b3864ef0f4421df7cb931e1d0eb9434f472
|
[
"Apache-2.0"
] | null | null | null |
from base import BaseModel
from keras.layers import Embedding, Input, GlobalMaxPooling1D, Layer
from keras.layers.merge import Concatenate, Multiply, dot, Dot
from keras.layers.core import Dense, Lambda, Reshape, Dropout
from keras.models import Model
from keras.layers.recurrent import LSTM, GRU
from keras.layers.wrappers import Bidirectional, TimeDistributed
from keras.optimizers import *
from keras.initializers import truncated_normal, zeros
from keras.layers.convolutional import *
import keras.backend as K
from keras.legacy import interfaces
from keras.optimizers import Optimizer
from keras import regularizers
from keras.layers.core import Reshape, Permute, Dense, Flatten, Lambda
from keras.activations import softmax
import numpy as np
import tensorflow as tf
from bert_loader_custom import load_trained_model_from_checkpoint
from keras_pos_embd import PositionEmbedding
from keras_layer_normalization import LayerNormalization
from keras_transformer import get_encoders
from keras_transformer import get_custom_objects as get_encoder_custom_objects
from keras_bert.layers import (get_inputs, get_embedding, TokenEmbedding, EmbeddingSimilarity, Masked, Extract)
import json
from keras.models import load_model
from keras_transformer import *
from keras_bert.layers import MaskedGlobalMaxPool1D
from keras_transformer.transformer import _wrap_layer
import keras
from keras.utils import multi_gpu_model
class CNN_LSTM_UA_DA_Model(BaseModel):
def __init__(self, name, nb_classes, vocabulary_size, embedding_size, nb_utterance_token,
nb_query_token, nb_utterances, nb_filters_utterance=50, nb_filters_query=50,
learning_rate=0.001, dropout=0.2, nb_hidden_unit=32):
self.nb_classes = nb_classes
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
# number of tokens per utterance
self.nb_utterance_token = nb_utterance_token
# number of tokens per query
self.nb_query_token = nb_query_token
# number of utterance per dialog
self.nb_utterances = nb_utterances
# number of filters in utterance convolution and query convolution
self.nb_filters_utterance = nb_filters_utterance
self.nb_filters_query = nb_filters_query
# hidden unit size of LSTM
self.nb_hidden_unit = nb_hidden_unit
self.learning_rate = learning_rate
self.dropout = dropout
self.embedding_layer_utterance = None
self.embedding_layer_query = None
self.embedding_set = False
model = self.build_model()
super(CNN_LSTM_UA_DA_Model, self).__init__(name, model)
def masking_lambda(self, x):
# masking out probabilities of entities that don't appear
m_classes, m_masks = x[0], x[1]
masked_classes = m_classes * m_masks
masked_sum_ = K.sum(masked_classes, axis=1)
masked_sum_ = K.expand_dims(masked_sum_, -1)
masked_sum = K.repeat_elements(masked_sum_, self.nb_classes, axis=1)
masked_classes = masked_classes / masked_sum
masked_classes = K.clip(masked_classes, 1e-7, 1.0 - 1e-7)
return masked_classes
def crossatt(self, x):
doc, query, doc_mask, q_mask = x[0], x[1], x[2], x[3]
trans_doc = K.permute_dimensions(doc, (0, 2, 1))
match_score = K.tanh(dot([query, trans_doc], (2, 1)))
query_to_doc_att = K.softmax(K.sum(match_score, axis=1))
doc_to_query_att = K.softmax(K.sum(match_score, axis=-1))
alpha = query_to_doc_att * doc_mask
a_sum = K.sum(alpha, axis=1)
_a_sum = K.expand_dims(a_sum, -1)
alpha = alpha / _a_sum
beta = doc_to_query_att * q_mask
b_sum = K.sum(beta, axis=1)
_b_sum = K.expand_dims(b_sum, 1)
beta = beta / _b_sum
doc_vector = dot([trans_doc, alpha], (2, 1))
trans_que = K.permute_dimensions(query, (0, 2, 1))
que_vector = dot([trans_que, beta], (2, 1))
final_hidden = K.concatenate([doc_vector, que_vector])
return final_hidden
def build_model(self):
inputs = []
# utterances
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token,)))
# similarity matrices
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token, self.nb_query_token)))
# query
inputs.append(Input(shape=(self.nb_query_token,)))
# entity mask
inputs.append(Input(shape=(self.nb_classes,)))
# query token mask
inputs.append(Input(shape=(self.nb_query_token,)))
# dialog mask
inputs.append(Input(shape=(self.nb_utterances,)))
# embedding layer for utterances and query
self.embedding_layer_utterance = Embedding(self.vocabulary_size, self.embedding_size)
self.embedding_layer_query = Embedding(self.vocabulary_size, self.embedding_size,
input_length=self.nb_query_token, mask_zero=True)
# utternace level attention matrix
attn = DocAttentionMap((self.nb_utterance_token, self.embedding_size))
# 3-D embedding for utterances
embedding_utterances = []
for i in range(self.nb_utterances):
embedding_utter = Reshape((self.nb_utterance_token, self.embedding_size, 1))(
self.embedding_layer_utterance(inputs[i]))
doc_att_map = Reshape((self.nb_utterance_token, self.embedding_size, 1))(
attn(inputs[i + self.nb_utterances]))
embedding_utterances.append(Concatenate()([embedding_utter, doc_att_map]))
# convolution embedding input for query
conv_embedding_query = Reshape((self.nb_query_token, self.embedding_size, 1))(
self.embedding_layer_utterance(inputs[-4]))
# LSTM embedding input for query
embedding_query = self.embedding_layer_query(inputs[-4])
# convolution output for query
conv_q = Reshape((self.nb_query_token, self.nb_filters_query))(
Convolution2D(self.nb_filters_query, (1, self.embedding_size), activation='relu')(conv_embedding_query))
# utterance embeddings
scene = []
for i in range(self.nb_utterances):
utter = []
for j in range(2, 6):
conv_u = Convolution2D(self.nb_filters_utterance, (j, self.embedding_size), activation='relu')(
embedding_utterances[i])
pool_u = Reshape((self.nb_filters_utterance,))(
MaxPooling2D(pool_size=(self.nb_utterance_token - j + 1, 1))(conv_u))
utter.append(pool_u)
scene.append(Reshape((self.nb_filters_utterance * 4, 1))(Concatenate()(utter)))
# dialog matrix
scene = Permute((2, 1))(Concatenate()(scene))
# convolution output of dialog matrix
reshape_scene = Reshape((self.nb_utterances, self.nb_filters_utterance * 4, 1))(scene)
single = Convolution2D(self.nb_filters_utterance, (1, self.nb_filters_utterance * 4), activation='relu')(
reshape_scene)
single = Reshape((self.nb_utterances, self.nb_filters_utterance))(single)
# context embedding for both dialog and query
d_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
q_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
bi_d_rnn = Bidirectional(d_rnn_layer, merge_mode='concat')(scene)
bi_q_rnn = Bidirectional(q_rnn_layer, merge_mode='concat')(embedding_query)
# dialog level attention vector
att_vector = Lambda(self.crossatt, output_shape=(self.nb_filters_utterance * 2,))(
[single, conv_q, inputs[-1], inputs[-2]])
merged_vectors = Concatenate()([bi_d_rnn, bi_q_rnn, att_vector])
classes = Dense(units=self.nb_classes, activation='softmax')(merged_vectors)
# masking
classes_normalized = Lambda(self.masking_lambda, output_shape=(self.nb_classes,))([classes, inputs[-3]])
model = Model(inputs=inputs, outputs=classes_normalized)
opt = Adam(lr=self.learning_rate)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)
return model
def load_embedding(self, embedding):
if self.model is None:
raise Exception('model has not been built')
self.embedding_layer_query.set_weights(embedding)
self.embedding_layer_utterance.set_weights(embedding)
self.embedding_set = True
def fit(self, x, y, *args, **kwargs):
if self.embedding_set is False:
raise Exception('embedding has not bet set')
hist = self.model.fit(x, y, *args, **kwargs)
return hist
def predict_classes(self, x, y_masks):
predictions = self.model.predict(x)
predictions_masked = predictions * y_masks
classes = [np.argmax(i) for i in predictions_masked]
return classes
class CNN_LSTM_UA_Model(BaseModel):
def __init__(self, name, nb_classes, vocabulary_size, embedding_size, nb_utterance_token,
nb_query_token, nb_utterances, nb_filters_utterance=50, nb_filters_query=50,
learning_rate=0.001, dropout=0.2, nb_hidden_unit=32):
self.nb_classes = nb_classes
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
# number of tokens per utterance
self.nb_utterance_token = nb_utterance_token
# number of tokens per query
self.nb_query_token = nb_query_token
# number of utterance per dialog
self.nb_utterances = nb_utterances
# number of filters in utterance convolution and query convolution
self.nb_filters_utterance = nb_filters_utterance
self.nb_filters_query = nb_filters_query
# hidden unit size of LSTM
self.nb_hidden_unit = nb_hidden_unit
self.learning_rate = learning_rate
self.dropout = dropout
self.embedding_layer_utterance = None
self.embedding_layer_query = None
self.embedding_set = False
model = self.build_model()
super(CNN_LSTM_UA_Model, self).__init__(name, model)
def masking_lambda(self, x):
# masking out probabilities of entities that don't appear
m_classes, m_masks = x[0], x[1]
masked_classes = m_classes * m_masks
masked_sum_ = K.sum(masked_classes, axis=1)
masked_sum_ = K.expand_dims(masked_sum_, -1)
masked_sum = K.repeat_elements(masked_sum_, self.nb_classes, axis=1)
masked_classes = masked_classes / masked_sum
masked_classes = K.clip(masked_classes, 1e-7, 1.0 - 1e-7)
return masked_classes
def build_model(self):
inputs = []
# utterances
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token,)))
# similarity matrices
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token, self.nb_query_token)))
# query
inputs.append(Input(shape=(self.nb_query_token,)))
# entity mask
inputs.append(Input(shape=(self.nb_classes,)))
# query token mask
inputs.append(Input(shape=(self.nb_query_token,)))
# dialog mask
inputs.append(Input(shape=(self.nb_utterances,)))
# embedding layer for utterances and query
self.embedding_layer_utterance = Embedding(self.vocabulary_size, self.embedding_size)
self.embedding_layer_query = Embedding(self.vocabulary_size, self.embedding_size,
input_length=self.nb_query_token, mask_zero=True)
# utternace level attention matrix
attn = DocAttentionMap((self.nb_utterance_token, self.embedding_size))
# 3-D embedding for utterances
embedding_utterances = []
for i in range(self.nb_utterances):
embedding_utter = Reshape((self.nb_utterance_token, self.embedding_size, 1))(
self.embedding_layer_utterance(inputs[i]))
doc_att_map = Reshape((self.nb_utterance_token, self.embedding_size, 1))(
attn(inputs[i + self.nb_utterances]))
embedding_utterances.append(Concatenate()([embedding_utter, doc_att_map]))
# LSTM embedding input for query
embedding_query = self.embedding_layer_query(inputs[-4])
# utterance embeddings
scene = []
for i in range(self.nb_utterances):
utter = []
for j in range(2, 6):
conv_u = Convolution2D(self.nb_filters_utterance, (j, self.embedding_size), activation='relu')(
embedding_utterances[i])
pool_u = Reshape((self.nb_filters_utterance,))(
MaxPooling2D(pool_size=(self.nb_utterance_token - j + 1, 1))(conv_u))
utter.append(pool_u)
scene.append(Reshape((self.nb_filters_utterance * 4, 1))(Concatenate()(utter)))
# dialog matrix
scene = Permute((2, 1))(Concatenate()(scene))
# context embedding for both dialog and query
d_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
q_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
bi_d_rnn = Bidirectional(d_rnn_layer, merge_mode='concat')(scene)
bi_q_rnn = Bidirectional(q_rnn_layer, merge_mode='concat')(embedding_query)
merged_vectors = Concatenate()([bi_d_rnn, bi_q_rnn])
classes = Dense(units=self.nb_classes, activation='softmax')(merged_vectors)
# masking
classes_normalized = Lambda(self.masking_lambda, output_shape=(self.nb_classes,))([classes, inputs[-3]])
model = Model(inputs=inputs, outputs=classes_normalized)
opt = Adam(lr=self.learning_rate)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)
return model
def load_embedding(self, embedding):
if self.model is None:
raise Exception('model has not been built')
self.embedding_layer_query.set_weights(embedding)
self.embedding_layer_utterance.set_weights(embedding)
self.embedding_set = True
def fit(self, x, y, *args, **kwargs):
if self.embedding_set is False:
raise Exception('embedding has not bet set')
hist = self.model.fit(x, y, *args, **kwargs)
return hist
def predict_classes(self, x, y_masks):
predictions = self.model.predict(x)
predictions_masked = predictions * y_masks
classes = [np.argmax(i) for i in predictions_masked]
return classes
class CNN_LSTM_DA_Model(BaseModel):
def __init__(self, name, nb_classes, vocabulary_size, embedding_size, nb_utterance_token,
nb_query_token, nb_utterances, nb_filters_utterance=50, nb_filters_query=50,
learning_rate=0.001, dropout=0.2, nb_hidden_unit=32):
self.nb_classes = nb_classes
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
# number of tokens per utterance
self.nb_utterance_token = nb_utterance_token
# number of tokens per query
self.nb_query_token = nb_query_token
# number of utterance per dialog
self.nb_utterances = nb_utterances
# number of filters in utterance convolution and query convolution
self.nb_filters_utterance = nb_filters_utterance
self.nb_filters_query = nb_filters_query
# hidden unit size of LSTM
self.nb_hidden_unit = nb_hidden_unit
self.learning_rate = learning_rate
self.dropout = dropout
self.embedding_layer_utterance = None
self.embedding_layer_query = None
self.embedding_set = False
model = self.build_model()
super(CNN_LSTM_DA_Model, self).__init__(name, model)
def masking_lambda(self, x):
# masking out probabilities of entities that don't appear
m_classes, m_masks = x[0], x[1]
masked_classes = m_classes * m_masks
masked_sum_ = K.sum(masked_classes, axis=1)
masked_sum_ = K.expand_dims(masked_sum_, -1)
masked_sum = K.repeat_elements(masked_sum_, self.nb_classes, axis=1)
masked_classes = masked_classes / masked_sum
masked_classes = K.clip(masked_classes, 1e-7, 1.0 - 1e-7)
return masked_classes
def crossatt(self, x):
doc, query, doc_mask, q_mask = x[0], x[1], x[2], x[3]
trans_doc = K.permute_dimensions(doc, (0, 2, 1))
match_score = K.tanh(dot([query, trans_doc], (2, 1)))
query_to_doc_att = K.softmax(K.sum(match_score, axis=1))
doc_to_query_att = K.softmax(K.sum(match_score, axis=-1))
alpha = query_to_doc_att * doc_mask
a_sum = K.sum(alpha, axis=1)
_a_sum = K.expand_dims(a_sum, -1)
alpha = alpha / _a_sum
beta = doc_to_query_att * q_mask
b_sum = K.sum(beta, axis=1)
_b_sum = K.expand_dims(b_sum, 1)
beta = beta / _b_sum
doc_vector = dot([trans_doc, alpha], (2, 1))
trans_que = K.permute_dimensions(query, (0, 2, 1))
que_vector = dot([trans_que, beta], (2, 1))
final_hidden = K.concatenate([doc_vector, que_vector])
return final_hidden
def build_model(self):
inputs = []
# utterances
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token,)))
# query
inputs.append(Input(shape=(self.nb_query_token,)))
# entity mask
inputs.append(Input(shape=(self.nb_classes,)))
# query token mask
inputs.append(Input(shape=(self.nb_query_token,)))
# dialog mask
inputs.append(Input(shape=(self.nb_utterances,)))
# embedding layer for utterances and query
self.embedding_layer_utterance = Embedding(self.vocabulary_size, self.embedding_size)
self.embedding_layer_query = Embedding(self.vocabulary_size, self.embedding_size,
input_length=self.nb_query_token, mask_zero=True)
# 2-D embedding for utterances
embedding_utterances = []
for i in range(self.nb_utterances):
embedding_utter = Reshape((self.nb_utterance_token, self.embedding_size, 1))(
self.embedding_layer_utterance(inputs[i]))
embedding_utterances.append(embedding_utter)
# convolution embedding input for query
conv_embedding_query = Reshape((self.nb_query_token, self.embedding_size, 1))(
self.embedding_layer_utterance(inputs[-4]))
# LSTM embedding input for query
embedding_query = self.embedding_layer_query(inputs[-4])
# convolution output for query
conv_q = Reshape((self.nb_query_token, self.nb_filters_query))(
Convolution2D(self.nb_filters_query, (1, self.embedding_size), activation='relu')(conv_embedding_query))
# utterance embeddings
scene = []
for i in range(self.nb_utterances):
utter = []
for j in range(2, 6):
conv_u = Convolution2D(self.nb_filters_utterance, (j, self.embedding_size), activation='relu')(
embedding_utterances[i])
pool_u = Reshape((self.nb_filters_utterance,))(
MaxPooling2D(pool_size=(self.nb_utterance_token - j + 1, 1))(conv_u))
utter.append(pool_u)
scene.append(Reshape((self.nb_filters_utterance * 4, 1))(Concatenate()(utter)))
# dialog matrix
scene = Permute((2, 1))(Concatenate()(scene))
# convolution output of dialog matrix
reshape_scene = Reshape((self.nb_utterances, self.nb_filters_utterance * 4, 1))(scene)
single = Convolution2D(self.nb_filters_utterance, (1, self.nb_filters_utterance * 4), activation='relu')(
reshape_scene)
single = Reshape((self.nb_utterances, self.nb_filters_utterance))(single)
# context embedding for both dialog and query
d_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
q_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
bi_d_rnn = Bidirectional(d_rnn_layer, merge_mode='concat')(scene)
bi_q_rnn = Bidirectional(q_rnn_layer, merge_mode='concat')(embedding_query)
# dialog level attention vector
att_vector = Lambda(self.crossatt, output_shape=(self.nb_filters_utterance * 2,))(
[single, conv_q, inputs[-1], inputs[-2]])
merged_vectors = Concatenate()([bi_d_rnn, bi_q_rnn, att_vector])
classes = Dense(units=self.nb_classes, activation='softmax')(merged_vectors)
# masking
classes_normalized = Lambda(self.masking_lambda, output_shape=(self.nb_classes,))([classes, inputs[-3]])
model = Model(inputs=inputs, outputs=classes_normalized)
opt = Adam(lr=self.learning_rate)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)
return model
def load_embedding(self, embedding):
if self.model is None:
raise Exception('model has not been built')
self.embedding_layer_query.set_weights(embedding)
self.embedding_layer_utterance.set_weights(embedding)
self.embedding_set = True
def fit(self, x, y, *args, **kwargs):
if self.embedding_set is False:
raise Exception('embedding has not bet set')
hist = self.model.fit(x, y, *args, **kwargs)
return hist
def predict_classes(self, x, y_masks):
predictions = self.model.predict(x)
predictions_masked = predictions * y_masks
classes = [np.argmax(i) for i in predictions_masked]
return classes
class CNN_LSTM_Model(BaseModel):
def __init__(self, name, nb_classes, vocabulary_size, embedding_size, nb_utterance_token,
nb_query_token, nb_utterances, nb_filters_utterance=50, nb_filters_query=50,
learning_rate=0.001, dropout=0.2, nb_hidden_unit=32):
self.nb_classes = nb_classes
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
# number of tokens per utterance
self.nb_utterance_token = nb_utterance_token
# number of tokens per query
self.nb_query_token = nb_query_token
# number of utterance per dialog
self.nb_utterances = nb_utterances
# number of filters in utterance convolution and query convolution
self.nb_filters_utterance = nb_filters_utterance
self.nb_filters_query = nb_filters_query
# hidden unit size of LSTM
self.nb_hidden_unit = nb_hidden_unit
self.learning_rate = learning_rate
self.dropout = dropout
self.embedding_layer_utterance = None
self.embedding_layer_query = None
self.embedding_set = False
model = self.build_model()
super(CNN_LSTM_Model, self).__init__(name, model)
def masking_lambda(self, x):
# masking out probabilities of entities that don't appear
m_classes, m_masks = x[0], x[1]
masked_classes = m_classes * m_masks
masked_sum_ = K.sum(masked_classes, axis=1)
masked_sum_ = K.expand_dims(masked_sum_, -1)
masked_sum = K.repeat_elements(masked_sum_, self.nb_classes, axis=1)
masked_classes = masked_classes / masked_sum
masked_classes = K.clip(masked_classes, 1e-7, 1.0 - 1e-7)
return masked_classes
def build_model(self):
inputs = []
# utterances
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token,)))
# query
inputs.append(Input(shape=(self.nb_query_token,)))
# entity mask
inputs.append(Input(shape=(self.nb_classes,)))
# query token mask
inputs.append(Input(shape=(self.nb_query_token,)))
# dialog mask
inputs.append(Input(shape=(self.nb_utterances,)))
# embedding layer for utterances and query
self.embedding_layer_utterance = Embedding(self.vocabulary_size, self.embedding_size)
self.embedding_layer_query = Embedding(self.vocabulary_size, self.embedding_size,
input_length=self.nb_query_token, mask_zero=True)
# 2-D embedding for utterances
embedding_utterances = []
for i in range(self.nb_utterances):
embedding_utter = Reshape((self.nb_utterance_token, self.embedding_size, 1))(
self.embedding_layer_utterance(inputs[i]))
embedding_utterances.append(embedding_utter)
# LSTM embedding input for query
embedding_query = self.embedding_layer_query(inputs[-4])
# utterance embeddings
scene = []
for i in range(self.nb_utterances):
utter = []
for j in range(2, 6):
conv_u = Convolution2D(self.nb_filters_utterance, (j, self.embedding_size), activation='relu')(
embedding_utterances[i])
pool_u = Reshape((self.nb_filters_utterance,))(
MaxPooling2D(pool_size=(self.nb_utterance_token - j + 1, 1))(conv_u))
utter.append(pool_u)
scene.append(Reshape((self.nb_filters_utterance * 4, 1))(Concatenate()(utter)))
# dialog matrix
scene = Permute((2, 1))(Concatenate()(scene))
# context embedding for both dialog and query
d_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
q_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
bi_d_rnn = Bidirectional(d_rnn_layer, merge_mode='concat')(scene)
bi_q_rnn = Bidirectional(q_rnn_layer, merge_mode='concat')(embedding_query)
merged_vectors = Concatenate()([bi_d_rnn, bi_q_rnn])
classes = Dense(units=self.nb_classes, activation='softmax')(merged_vectors)
# masking
classes_normalized = Lambda(self.masking_lambda, output_shape=(self.nb_classes,))([classes, inputs[-3]])
model = Model(inputs=inputs, outputs=classes_normalized)
opt = Adam(lr=self.learning_rate)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)
return model
def load_embedding(self, embedding):
if self.model is None:
raise Exception('model has not been built')
self.embedding_layer_query.set_weights(embedding)
self.embedding_layer_utterance.set_weights(embedding)
self.embedding_set = True
def fit(self, x, y, *args, **kwargs):
if self.embedding_set is False:
raise Exception('embedding has not bet set')
hist = self.model.fit(x, y, *args, **kwargs)
return hist
def predict_classes(self, x, y_masks):
predictions = self.model.predict(x)
predictions_masked = predictions * y_masks
classes = [np.argmax(i) for i in predictions_masked]
return classes
class LSTM_Model(BaseModel):
def __init__(self, name, nb_classes, vocabulary_size, embedding_size, nb_utterance_token,
nb_query_token, nb_utterances, nb_filters_utterance=50, nb_filters_query=50,
learning_rate=0.001, dropout=0.2, nb_hidden_unit=32):
self.nb_classes = nb_classes
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
# number of tokens per utterance
self.nb_utterance_token = nb_utterance_token
# number of tokens per query
self.nb_query_token = nb_query_token
# number of utterance per dialog
self.nb_utterances = nb_utterances
# number of filters in utterance convolution and query convolution
self.nb_filters_utterance = nb_filters_utterance
self.nb_filters_query = nb_filters_query
# hidden unit size of LSTM
self.nb_hidden_unit = nb_hidden_unit
self.learning_rate = learning_rate
self.dropout = dropout
self.embedding_layer_utterance = None
self.embedding_layer_query = None
self.embedding_set = False
model = self.build_model()
super(LSTM_Model, self).__init__(name, model)
def masking_lambda(self, x):
# masking out probabilities of entities that don't appear
m_classes, m_masks = x[0], x[1]
masked_classes = m_classes * m_masks
masked_sum_ = K.sum(masked_classes, axis=1)
masked_sum_ = K.expand_dims(masked_sum_, -1)
masked_sum = K.repeat_elements(masked_sum_, self.nb_classes, axis=1)
masked_classes = masked_classes / masked_sum
masked_classes = K.clip(masked_classes, 1e-7, 1.0 - 1e-7)
return masked_classes
def build_model(self):
inputs = []
# utterances
for i in range(self.nb_utterances):
inputs.append(Input(shape=(self.nb_utterance_token,)))
# query
inputs.append(Input(shape=(self.nb_query_token,)))
# entity mask
inputs.append(Input(shape=(self.nb_classes,)))
# query token mask
inputs.append(Input(shape=(self.nb_query_token,)))
# dialog mask
inputs.append(Input(shape=(self.nb_utterances,)))
# embedding layer for utterances and query
self.embedding_layer_utterance = Embedding(self.vocabulary_size, self.embedding_size)
self.embedding_layer_query = Embedding(self.vocabulary_size, self.embedding_size,
input_length=self.nb_query_token, mask_zero=True)
# 2-D embedding for utterances
embedding_utterances = []
for i in range(self.nb_utterances):
embedding_utter = (self.embedding_layer_utterance(inputs[i]))
embedding_utterances.append(embedding_utter)
# LSTM embedding input for query
embedding_query = self.embedding_layer_query(inputs[-4])
# utterance embeddings
scene = []
for i in range(self.nb_utterances):
utter = []
for j in range(2, 6):
utter.append(embedding_utterances[i])
scene.append((Concatenate()(utter)))
# dialog matrix
scene = (Concatenate()(scene))
# context embedding for both dialog and query
d_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
q_rnn_layer = LSTM(self.nb_hidden_unit, activation='tanh', dropout=self.dropout)
bi_d_rnn = Bidirectional(d_rnn_layer, merge_mode='concat')(scene)
bi_q_rnn = Bidirectional(q_rnn_layer, merge_mode='concat')(embedding_query)
merged_vectors = Concatenate()([bi_d_rnn, bi_q_rnn])
classes = Dense(units=self.nb_classes, activation='softmax')(merged_vectors)
# masking
classes_normalized = Lambda(self.masking_lambda, output_shape=(self.nb_classes,))([classes, inputs[-3]])
model = Model(inputs=inputs, outputs=classes_normalized)
opt = Adam(lr=self.learning_rate)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)
return model
def load_embedding(self, embedding):
if self.model is None:
raise Exception('model has not been built')
self.embedding_layer_query.set_weights(embedding)
self.embedding_layer_utterance.set_weights(embedding)
self.embedding_set = True
def fit(self, x, y, *args, **kwargs):
hist = self.model.fit(x, y, *args, **kwargs)
return hist
def predict_classes(self, x, y_masks):
predictions = self.model.predict(x)
predictions_masked = predictions * y_masks
classes = [np.argmax(i) for i in predictions_masked]
return classes
def gelu(x):
return 0.5 * x * (1.0 + tf.erf(x / tf.sqrt(2.0)))
class Bert_Model(BaseModel):
def __init__(self, name, nb_classes, config_path, model_path, learning_rate=2e-5, drop_out=0.1, training=False):
self.nb_classes = nb_classes
self.learning_rate = learning_rate
self.config_path = config_path
self.model_path = model_path
self.drop_out = drop_out
model = self.load_trained_model_from_checkpoint(self.config_path, self.model_path, training=training)
print("build model success")
super(Bert_Model, self).__init__(name, model)
def masking_lambda(self, x):
# masking out probabilities of entities that don't appear
m_classes, m_masks = x[0], x[1]
masked_classes = m_classes * m_masks
masked_sum_ = K.sum(masked_classes, axis=1)
masked_sum_ = K.expand_dims(masked_sum_, -1)
masked_sum = K.repeat_elements(masked_sum_, self.nb_classes, axis=1)
masked_classes = masked_classes / masked_sum
masked_classes = K.clip(masked_classes, 1e-7, 1.0 - 1e-7)
return masked_classes
def fit(self, x, y, *args, **kwargs):
hist = self.model.fit(x, y, *args, **kwargs)
return hist
def predict_classes(self, x, y_masks):
predictions = self.model.predict(x)
predictions_masked = predictions * y_masks
classes = [np.argmax(i) for i in predictions_masked]
return classes
def get_model(self,
token_num,
pos_num=512,
seq_len=512,
embed_dim=768,
transformer_num=12,
head_num=12,
feed_forward_dim=3072,
dropout_rate=0.0,
attention_activation=None,
feed_forward_activation=gelu,
training=False):
inputs = get_inputs(seq_len=seq_len)
model_inputs = inputs[:2]
model_inputs.append(Input(shape=(seq_len, self.nb_classes,)))
model_inputs.append(Input(shape=(seq_len, embed_dim,)))
model_inputs.append(Input(shape=(self.nb_classes,)))
embed_layer, embed_weights = get_embedding(
inputs,
token_num=token_num,
embed_dim=embed_dim,
pos_num=pos_num,
dropout_rate=dropout_rate,
trainable=training,
)
transformed = embed_layer
transformed = get_encoders(
encoder_num=transformer_num,
input_layer=transformed,
head_num=head_num,
hidden_dim=feed_forward_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=training,
)
first_token_tensor = Lambda(lambda y: K.squeeze(y[:, 0:1, :], axis=1))(transformed)
classes = Dense(units=self.nb_classes,
use_bias=True,
kernel_initializer=truncated_normal(stddev=0.02),
activation='softmax')(first_token_tensor)
classes_normalized = Lambda(self.masking_lambda, output_shape=(self.nb_classes,))(
[classes, model_inputs[-1]])
model = Model(inputs=model_inputs, outputs=classes_normalized)
opt = Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-6, decay=0.01)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)
return model
def checkpoint_loader(self, checkpoint_file):
def _loader(name):
return tf.train.load_variable(checkpoint_file, name)
return _loader
def load_trained_model_from_checkpoint(self, config_file,
checkpoint_file,
training=False,
seq_len=None):
with open(config_file, 'r') as reader:
config = json.loads(reader.read())
if seq_len is None:
seq_len = config['max_position_embeddings']
else:
seq_len = min(seq_len, config['max_position_embeddings'])
model = self.get_model(
token_num=config['vocab_size'],
pos_num=seq_len,
seq_len=seq_len,
embed_dim=config['hidden_size'],
transformer_num=config['num_hidden_layers'],
head_num=config['num_attention_heads'],
feed_forward_dim=config['intermediate_size'],
training=training,
)
return model
def set_bert_weights(self, config_file, checkpoint_file, seq_len=512):
with open(config_file, 'r') as reader:
config = json.loads(reader.read())
loader = self.checkpoint_loader(checkpoint_file)
self.model.get_layer(name='Embedding-Token').set_weights([
loader('bert/embeddings/word_embeddings'),
])
self.model.get_layer(name='Embedding-Position').set_weights([
loader('bert/embeddings/position_embeddings')[:seq_len, :],
])
self.model.get_layer(name='Embedding-Segment').set_weights([
loader('bert/embeddings/token_type_embeddings'),
])
self.model.get_layer(name='Embedding-Norm').set_weights([
loader('bert/embeddings/LayerNorm/gamma'),
loader('bert/embeddings/LayerNorm/beta'),
])
for i in range(config['num_hidden_layers']):
self.model.get_layer(name='Encoder-%d-MultiHeadSelfAttention' % (i + 1)).set_weights([
loader('bert/encoder/layer_%d/attention/self/query/kernel' % i),
loader('bert/encoder/layer_%d/attention/self/query/bias' % i),
loader('bert/encoder/layer_%d/attention/self/key/kernel' % i),
loader('bert/encoder/layer_%d/attention/self/key/bias' % i),
loader('bert/encoder/layer_%d/attention/self/value/kernel' % i),
loader('bert/encoder/layer_%d/attention/self/value/bias' % i),
loader('bert/encoder/layer_%d/attention/output/dense/kernel' % i),
loader('bert/encoder/layer_%d/attention/output/dense/bias' % i),
])
self.model.get_layer(name='Encoder-%d-MultiHeadSelfAttention-Norm' % (i + 1)).set_weights([
loader('bert/encoder/layer_%d/attention/output/LayerNorm/gamma' % i),
loader('bert/encoder/layer_%d/attention/output/LayerNorm/beta' % i),
])
self.model.get_layer(name='Encoder-%d-MultiHeadSelfAttention-Norm' % (i + 1)).set_weights([
loader('bert/encoder/layer_%d/attention/output/LayerNorm/gamma' % i),
loader('bert/encoder/layer_%d/attention/output/LayerNorm/beta' % i),
])
self.model.get_layer(name='Encoder-%d-FeedForward' % (i + 1)).set_weights([
loader('bert/encoder/layer_%d/intermediate/dense/kernel' % i),
loader('bert/encoder/layer_%d/intermediate/dense/bias' % i),
loader('bert/encoder/layer_%d/output/dense/kernel' % i),
loader('bert/encoder/layer_%d/output/dense/bias' % i),
])
self.model.get_layer(name='Encoder-%d-FeedForward-Norm' % (i + 1)).set_weights([
loader('bert/encoder/layer_%d/output/LayerNorm/gamma' % i),
loader('bert/encoder/layer_%d/output/LayerNorm/beta' % i),
])
class DocAttentionMap(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.U = None
super(DocAttentionMap, self).__init__(**kwargs)
def get_config(self):
base_config = super(DocAttentionMap, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
def build(self, input_shape):
# print input_shapexsss
self.U = self.add_weight(name='kernel',
shape=(input_shape[-1], self.output_dim[1]),
initializer='uniform',
trainable=True)
super(DocAttentionMap, self).build(input_shape)
def call(self, x, **kwargs):
# print 'x (Q): %s' % str(x._keras_shape)
# print 'U : %s' % str(self.U._keras_shape)
xU = K.tanh(K.dot(x, self.U))
return xU
def compute_output_shape(self, input_shape):
return None, self.output_dim[0], self.output_dim[1]
| 45.112472
| 116
| 0.651156
| 5,116
| 40,511
| 4.881353
| 0.05864
| 0.039643
| 0.030273
| 0.026429
| 0.875666
| 0.858007
| 0.846474
| 0.834101
| 0.827454
| 0.809354
| 0
| 0.010574
| 0.250648
| 40,511
| 897
| 117
| 45.162765
| 0.81207
| 0.073042
| 0
| 0.75405
| 0
| 0
| 0.05352
| 0.037683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069219
| false
| 0
| 0.045655
| 0.004418
| 0.173785
| 0.001473
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3fdd6e1b94bdc631581b67f09373686d5e6fdbc5
| 5,583
|
py
|
Python
|
tests/test_match_dims_like.py
|
JoyMonteiro/sympl
|
c8bee914651824360a46bf71119dd87a93a07219
|
[
"BSD-3-Clause"
] | 46
|
2017-01-05T00:21:18.000Z
|
2022-03-05T12:20:39.000Z
|
tests/test_match_dims_like.py
|
JoyMonteiro/sympl
|
c8bee914651824360a46bf71119dd87a93a07219
|
[
"BSD-3-Clause"
] | 47
|
2017-03-27T13:37:31.000Z
|
2022-02-02T07:14:22.000Z
|
tests/test_match_dims_like.py
|
JoyMonteiro/sympl
|
c8bee914651824360a46bf71119dd87a93a07219
|
[
"BSD-3-Clause"
] | 11
|
2017-01-27T23:03:34.000Z
|
2020-06-22T20:05:49.000Z
|
import pytest
from sympl import (
DataArray, get_numpy_arrays_with_properties, InvalidStateError)
import numpy as np
def test_match_dims_like_hardcoded_dimensions_matching_lengths():
input_state = {
'air_temperature': DataArray(
np.zeros([2, 3, 4]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'degK'},
),
'air_pressure': DataArray(
np.zeros([2, 3, 4]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'Pa'},
),
}
input_properties = {
'air_temperature': {
'dims': ['alpha', 'beta', 'gamma'],
'units': 'degK',
'match_dims_like': 'air_pressure',
},
'air_pressure': {
'dims': ['alpha', 'beta', 'gamma'],
'units': 'Pa',
},
}
raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
def test_match_dims_like_partly_hardcoded_dimensions_matching_lengths():
input_state = {
'air_temperature': DataArray(
np.zeros([2, 3, 4]),
dims=['lat', 'lon', 'mid_levels'],
attrs={'units': 'degK'},
),
'air_pressure': DataArray(
np.zeros([2, 3, 4]),
dims=['lat', 'lon', 'interface_levels'],
attrs={'units': 'Pa'},
),
}
input_properties = {
'air_temperature': {
'dims': ['*', 'mid_levels'],
'units': 'degK',
'match_dims_like': 'air_pressure',
},
'air_pressure': {
'dims': ['*', 'interface_levels'],
'units': 'Pa',
},
}
raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
assert np.byte_bounds(input_state['air_temperature'].values) == np.byte_bounds(raw_arrays['air_temperature'])
assert np.byte_bounds(input_state['air_pressure'].values) == np.byte_bounds(raw_arrays['air_pressure'])
def test_match_dims_like_hardcoded_dimensions_non_matching_lengths():
input_state = {
'air_temperature': DataArray(
np.zeros([2, 3, 4]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'degK'},
),
'air_pressure': DataArray(
np.zeros([4, 2, 3]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'Pa'},
),
}
input_properties = {
'air_temperature': {
'dims': ['alpha', 'beta', 'gamma'],
'units': 'degK',
'match_dims_like': 'air_pressure',
},
'air_pressure': {
'dims': ['alpha', 'beta', 'gamma'],
'units': 'Pa',
},
}
try:
raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
except InvalidStateError:
pass
else:
raise AssertionError('should have raised InvalidStateError')
def test_match_dims_like_wildcard_dimensions_matching_lengths():
input_state = {
'air_temperature': DataArray(
np.zeros([2, 3, 4]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'degK'},
),
'air_pressure': DataArray(
np.zeros([2, 3, 4]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'Pa'},
),
}
input_properties = {
'air_temperature': {
'dims': ['*'],
'units': 'degK',
'match_dims_like': 'air_pressure',
},
'air_pressure': {
'dims': ['*'],
'units': 'Pa',
},
}
raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
def test_match_dims_like_wildcard_dimensions_non_matching_lengths():
input_state = {
'air_temperature': DataArray(
np.zeros([2, 3, 4]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'degK'},
),
'air_pressure': DataArray(
np.zeros([1, 2, 3]),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'Pa'},
),
}
input_properties = {
'air_temperature': {
'dims': ['*'],
'units': 'degK',
'match_dims_like': 'air_pressure',
},
'air_pressure': {
'dims': ['*'],
'units': 'Pa',
},
}
try:
raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
except InvalidStateError:
pass
else:
raise AssertionError('should have raised InvalidStateError')
def test_match_dims_like_wildcard_dimensions_use_same_ordering():
input_state = {
'air_temperature': DataArray(
np.random.randn(2, 3, 4),
dims=['alpha', 'beta', 'gamma'],
attrs={'units': 'degK'},
),
'air_pressure': DataArray(
np.zeros([4, 2, 3]),
dims=['gamma', 'alpha', 'beta'],
attrs={'units': 'Pa'},
),
}
for i in range(4):
input_state['air_pressure'][i, :, :] = input_state['air_temperature'][:, :, i]
input_properties = {
'air_temperature': {
'dims': ['*'],
'units': 'degK',
'match_dims_like': 'air_pressure',
},
'air_pressure': {
'dims': ['*'],
'units': 'Pa',
},
}
raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
assert np.all(raw_arrays['air_temperature'] == raw_arrays['air_pressure'])
if __name__ == '__main__':
pytest.main([__file__])
| 30.016129
| 113
| 0.516568
| 539
| 5,583
| 5.012987
| 0.139147
| 0.089563
| 0.062546
| 0.086603
| 0.873427
| 0.863064
| 0.850111
| 0.786825
| 0.77017
| 0.765729
| 0
| 0.009752
| 0.320437
| 5,583
| 185
| 114
| 30.178378
| 0.702425
| 0
| 0
| 0.719298
| 0
| 0
| 0.211714
| 0
| 0
| 0
| 0
| 0
| 0.02924
| 1
| 0.035088
| false
| 0.011696
| 0.017544
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b74e5310426a8ba315eca7640b7d4aedd341e1d9
| 177
|
py
|
Python
|
dtech_instagram/views/__init__.py
|
hideki-saito/InstagramAPP_Flask
|
c3ee6f10d35edb74f0f82f4370faca8f0c25200c
|
[
"MIT"
] | 1
|
2018-12-03T08:47:47.000Z
|
2018-12-03T08:47:47.000Z
|
dtech_instagram/views/__init__.py
|
hideki-saito/InstagramAPP_Flask
|
c3ee6f10d35edb74f0f82f4370faca8f0c25200c
|
[
"MIT"
] | 1
|
2018-12-12T17:31:31.000Z
|
2018-12-12T17:31:31.000Z
|
dtech_instagram/views/__init__.py
|
hideki-saito/InstagramAPP_Flask
|
c3ee6f10d35edb74f0f82f4370faca8f0c25200c
|
[
"MIT"
] | null | null | null |
import dtech_instagram.views.account
import dtech_instagram.views.index
import dtech_instagram.views.post
import dtech_instagram.views.settings
import dtech_instagram.views.ajax
| 35.4
| 37
| 0.892655
| 25
| 177
| 6.12
| 0.36
| 0.359477
| 0.653595
| 0.816993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 177
| 5
| 38
| 35.4
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b79b68b59d3f75c0f6b9cd4c39b47bde2c910889
| 6,437
|
py
|
Python
|
tests/test_flows.py
|
MattiaVarrone/nnest
|
9e12be0135ba2e7fa186a904bc33480c3b0c655a
|
[
"MIT"
] | 20
|
2019-04-11T13:39:15.000Z
|
2021-11-18T23:39:04.000Z
|
tests/test_flows.py
|
MattiaVarrone/nnest
|
9e12be0135ba2e7fa186a904bc33480c3b0c655a
|
[
"MIT"
] | 3
|
2019-05-02T10:06:04.000Z
|
2020-08-20T02:36:39.000Z
|
tests/test_flows.py
|
MattiaVarrone/nnest
|
9e12be0135ba2e7fa186a904bc33480c3b0c655a
|
[
"MIT"
] | 6
|
2019-05-01T19:22:04.000Z
|
2021-08-02T11:36:08.000Z
|
import torch
from torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform
import numpy as np
from nnest.trainer import Trainer
from nnest.distributions import GeneralisedNormal
max_forward_backward_diff = 1.0E-5
np.random.seed(0)
def test_base_dist():
for dims in [2, 3, 4, 5]:
base_dists = [
TransformedDistribution(Uniform(torch.zeros(dims), torch.ones(dims)), SigmoidTransform().inv),
MultivariateNormal(torch.zeros(dims), torch.eye(dims)),
GeneralisedNormal(torch.zeros(dims), torch.ones(dims), torch.tensor(8.0))
]
for base_dist in base_dists:
t = Trainer(dims, flow='choleksy', base_dist=base_dist)
test_data = np.random.normal(size=(10, dims))
test_data = torch.from_numpy(test_data).float()
z, z_log_det = t.forward(test_data)
assert z.shape == torch.Size([10, dims])
assert z_log_det.shape == torch.Size([10])
x, x_log_det = t.inverse(z)
diff = torch.max(x - test_data).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
diff = torch.max(x_log_det + z_log_det).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
samples = t.get_synthetic_samples(10)
assert samples.shape == torch.Size([10, dims])
log_probs = t.log_probs(test_data)
assert log_probs.shape == torch.Size([10])
def test_choleksy():
for dims in [2, 3, 4, 5]:
t = Trainer(dims, flow='choleksy')
test_data = np.random.normal(size=(10, dims))
test_data = torch.from_numpy(test_data).float()
z, z_log_det = t.forward(test_data)
assert z.shape == torch.Size([10, dims])
assert z_log_det.shape == torch.Size([10])
x, x_log_det = t.inverse(z)
diff = torch.max(x - test_data).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
diff = torch.max(x_log_det + z_log_det).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
samples = t.get_synthetic_samples(10)
assert samples.shape == torch.Size([10, dims])
log_probs = t.log_probs(test_data)
assert log_probs.shape == torch.Size([10])
def test_nvp():
for dims in [2, 3, 4, 5]:
t = Trainer(dims, flow='nvp')
test_data = np.random.normal(size=(10, dims))
test_data = torch.from_numpy(test_data).float()
z, z_log_det = t.forward(test_data)
assert z.shape == torch.Size([10, dims])
assert z_log_det.shape == torch.Size([10])
x, x_log_det = t.inverse(z)
diff = torch.max(x - test_data).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
diff = torch.max(x_log_det + z_log_det).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
samples = t.get_synthetic_samples(10)
assert samples.shape == torch.Size([10, dims])
log_probs = t.log_probs(test_data)
assert log_probs.shape == torch.Size([10])
def test_spline():
for dims in [2, 3, 4, 5]:
t = Trainer(dims, flow='spline')
test_data = np.random.normal(size=(10, dims))
test_data = torch.from_numpy(test_data).float()
z, z_log_det = t.forward(test_data)
assert z.shape == torch.Size([10, dims])
assert z_log_det.shape == torch.Size([10])
x, x_log_det = t.inverse(z)
diff = torch.max(x - test_data).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
diff = torch.max(x_log_det + z_log_det).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
samples = t.get_synthetic_samples(10)
assert samples.shape == torch.Size([10, dims])
log_probs = t.log_probs(test_data)
assert log_probs.shape == torch.Size([10])
def test_nvp_slow():
for num_slow in [2, 3, 4, 5]:
for num_fast in [2, 3, 4, 5]:
dims = num_slow + num_fast
t = Trainer(dims, num_slow=num_slow, flow='nvp')
test_data = np.random.normal(size=(10, dims))
test_data = torch.from_numpy(test_data).float()
z, z_log_det = t.forward(test_data)
assert z.shape == torch.Size([10, dims])
assert z_log_det.shape == torch.Size([10])
x, x_log_det = t.inverse(z)
diff = torch.max(x - test_data).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
diff = torch.max(x_log_det + z_log_det).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
dz = torch.randn_like(z) * 0.01
dz[:, 0:num_slow] = 0.0
xp, log_det = t.inverse(z + dz)
diff = torch.max((x - xp)[:, :num_slow]).detach().cpu().numpy()
assert np.abs(diff) == 0
samples = t.get_synthetic_samples(10)
assert samples.shape == torch.Size([10, dims])
log_probs = t.log_probs(test_data)
assert log_probs.shape == torch.Size([10])
def test_spline_slow():
for num_slow in [2, 3, 4, 5]:
for num_fast in [2, 3, 4, 5]:
dims = num_slow + num_fast
t = Trainer(dims, num_slow=num_slow, flow='spline')
test_data = np.random.normal(size=(10, dims))
test_data = torch.from_numpy(test_data).float()
z, z_log_det = t.forward(test_data)
assert z.shape == torch.Size([10, dims])
assert z_log_det.shape == torch.Size([10])
x, x_log_det = t.inverse(z)
diff = torch.max(x - test_data).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
diff = torch.max(x_log_det + z_log_det).detach().cpu().numpy()
assert np.abs(diff) <= max_forward_backward_diff
dz = torch.randn_like(z) * 0.01
dz[:, 0:num_slow] = 0.0
xp, log_det = t.inverse(z + dz)
diff = torch.max((x - xp)[:, :num_slow]).detach().cpu().numpy()
assert np.abs(diff) == 0
samples = t.get_synthetic_samples(10)
assert samples.shape == torch.Size([10, dims])
log_probs = t.log_probs(test_data)
assert log_probs.shape == torch.Size([10])
| 44.701389
| 106
| 0.596551
| 934
| 6,437
| 3.891863
| 0.078158
| 0.07923
| 0.092435
| 0.10564
| 0.883356
| 0.874553
| 0.859697
| 0.856121
| 0.856121
| 0.856121
| 0
| 0.026155
| 0.263477
| 6,437
| 143
| 107
| 45.013986
| 0.740561
| 0
| 0
| 0.806202
| 0
| 0
| 0.005282
| 0
| 0
| 0
| 0
| 0
| 0.294574
| 1
| 0.046512
| false
| 0
| 0.03876
| 0
| 0.085271
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7a90d0ca0527de097e3d6294e7ccc9ffb5a8344
| 10,491
|
py
|
Python
|
tests/pytests/functional/modules/state/requisites/test_listen.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 2
|
2015-08-21T01:05:03.000Z
|
2015-09-02T07:30:45.000Z
|
tests/pytests/functional/modules/state/requisites/test_listen.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 9
|
2021-03-31T20:25:25.000Z
|
2021-07-04T05:33:46.000Z
|
tests/pytests/functional/modules/state/requisites/test_listen.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 1
|
2021-11-30T06:51:52.000Z
|
2021-11-30T06:51:52.000Z
|
import pytest
pytestmark = [
pytest.mark.windows_whitelisted,
]
def test_listen_requisite(state, state_tree):
"""
Tests a simple state using the listen requisite
"""
sls_contents = """
successful_changing_state:
cmd.run:
- name: echo "Successful Change"
non_changing_state:
test.succeed_without_changes
test_listening_change_state:
cmd.run:
- name: echo "Listening State"
- listen:
- cmd: successful_changing_state
test_listening_non_changing_state:
cmd.run:
- name: echo "Only run once"
- listen:
- test: non_changing_state
# test that requisite resolution for listen uses ID declaration.
# test_listening_resolution_one and test_listening_resolution_two
# should both run.
test_listening_resolution_one:
cmd.run:
- name: echo "Successful listen resolution"
- listen:
- cmd: successful_changing_state
test_listening_resolution_two:
cmd.run:
- name: echo "Successful listen resolution"
- listen:
- cmd: successful_changing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
assert listener_state in ret
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
assert absent_state not in ret
def test_listen_in_requisite(state, state_tree):
"""
Tests a simple state using the listen_in requisite
"""
sls_contents = """
successful_changing_state:
cmd.run:
- name: echo "Successful Change"
- listen_in:
- cmd: test_listening_change_state
non_changing_state:
test.succeed_without_changes:
- listen_in:
- cmd: test_listening_non_changing_state
test_listening_change_state:
cmd.run:
- name: echo "Listening State"
test_listening_non_changing_state:
cmd.run:
- name: echo "Only run once"
# test that requisite resolution for listen_in uses ID declaration.
# test_listen_in_resolution should run.
test_listen_in_resolution:
cmd.wait:
- name: echo "Successful listen_in resolution"
successful_changing_state_name_foo:
test.succeed_with_changes:
- name: foo
- listen_in:
- cmd: test_listen_in_resolution
successful_non_changing_state_name_foo:
test.succeed_without_changes:
- name: foo
- listen_in:
- cmd: test_listen_in_resolution
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
assert listener_state in ret
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
assert absent_state not in ret
def test_listen_in_requisite_resolution(state, state_tree):
"""
Verify listen_in requisite lookups use ID declaration to check for changes
"""
sls_contents = """
successful_changing_state:
cmd.run:
- name: echo "Successful Change"
- listen_in:
- cmd: test_listening_change_state
non_changing_state:
test.succeed_without_changes:
- listen_in:
- cmd: test_listening_non_changing_state
test_listening_change_state:
cmd.run:
- name: echo "Listening State"
test_listening_non_changing_state:
cmd.run:
- name: echo "Only run once"
# test that requisite resolution for listen_in uses ID declaration.
# test_listen_in_resolution should run.
test_listen_in_resolution:
cmd.wait:
- name: echo "Successful listen_in resolution"
successful_changing_state_name_foo:
test.succeed_with_changes:
- name: foo
- listen_in:
- cmd: test_listen_in_resolution
successful_non_changing_state_name_foo:
test.succeed_without_changes:
- name: foo
- listen_in:
- cmd: test_listen_in_resolution
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
assert listener_state in ret
def test_listen_requisite_resolution(state, state_tree):
"""
Verify listen requisite lookups use ID declaration to check for changes
"""
sls_contents = """
successful_changing_state:
cmd.run:
- name: echo "Successful Change"
non_changing_state:
test.succeed_without_changes
test_listening_change_state:
cmd.run:
- name: echo "Listening State"
- listen:
- cmd: successful_changing_state
test_listening_non_changing_state:
cmd.run:
- name: echo "Only run once"
- listen:
- test: non_changing_state
# test that requisite resolution for listen uses ID declaration.
# test_listening_resolution_one and test_listening_resolution_two
# should both run.
test_listening_resolution_one:
cmd.run:
- name: echo "Successful listen resolution"
- listen:
- cmd: successful_changing_state
test_listening_resolution_two:
cmd.run:
- name: echo "Successful listen resolution"
- listen:
- cmd: successful_changing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
assert listener_state in ret
def test_listen_requisite_no_state_module(state, state_tree):
"""
Tests a simple state using the listen requisite
"""
sls_contents = """
successful_changing_state:
cmd.run:
- name: echo "Successful Change"
non_changing_state:
test.succeed_without_changes
test_listening_change_state:
cmd.run:
- name: echo "Listening State"
- listen:
- successful_changing_state
test_listening_non_changing_state:
cmd.run:
- name: echo "Only run once"
- listen:
- non_changing_state
# test that requisite resolution for listen uses ID declaration.
# test_listening_resolution_one and test_listening_resolution_two
# should both run.
test_listening_resolution_one:
cmd.run:
- name: echo "Successful listen resolution"
- listen:
- successful_changing_state
test_listening_resolution_two:
cmd.run:
- name: echo "Successful listen resolution"
- listen:
- successful_changing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
assert listener_state in ret
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
assert absent_state not in ret
def test_listen_in_requisite_resolution_names(state, state_tree):
"""
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
"""
sls_contents = """
test:
test.succeed_with_changes:
- name: test
- listen_in:
- test: service
service:
test.succeed_without_changes:
- names:
- nginx
- crond
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert "test_|-listener_service_|-nginx_|-mod_watch" in ret
assert "test_|-listener_service_|-crond_|-mod_watch" in ret
def test_listen_requisite_resolution_names(state, state_tree):
"""
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
"""
sls_contents = """
test:
test.succeed_with_changes:
- name: test
service:
test.succeed_without_changes:
- names:
- nginx
- crond
- listen:
- test: test
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert "test_|-listener_service_|-nginx_|-mod_watch" in ret
assert "test_|-listener_service_|-crond_|-mod_watch" in ret
def test_onlyif_req(state, subtests):
onlyif = [{}]
state_id = "test_|-onlyif test_|-onlyif test_|-succeed_with_changes"
with subtests.test(onlyif=onlyif):
ret = state.single(
name="onlyif test", fun="test.succeed_with_changes", onlyif=onlyif
)
assert ret[state_id]["result"] is True
assert ret[state_id]["comment"] == "Success!"
onlyif = [{"fun": "test.true"}]
state_id = "test_|-onlyif test_|-onlyif test_|-succeed_without_changes"
with subtests.test(onlyif=onlyif):
ret = state.single(
name="onlyif test", fun="test.succeed_without_changes", onlyif=onlyif
)
assert ret[state_id]["result"] is True
assert not ret[state_id]["changes"]
assert ret[state_id]["comment"] == "Success!"
onlyif = [{"fun": "test.false"}]
state_id = "test_|-onlyif test_|-onlyif test_|-fail_with_changes"
with subtests.test(onlyif=onlyif):
ret = state.single(
name="onlyif test", fun="test.fail_with_changes", onlyif=onlyif
)
assert ret[state_id]["result"] is True
assert not ret[state_id]["changes"]
assert ret[state_id]["comment"] == "onlyif condition is false"
onlyif = [{"fun": "test.true"}]
state_id = "test_|-onlyif test_|-onlyif test_|-fail_with_changes"
with subtests.test(onlyif=onlyif):
ret = state.single(
name="onlyif test", fun="test.fail_with_changes", onlyif=onlyif
)
assert ret[state_id]["result"] is False
assert ret[state_id]["changes"]
assert ret[state_id]["comment"] == "Failure!"
| 31.316418
| 121
| 0.661996
| 1,237
| 10,491
| 5.271625
| 0.069523
| 0.071768
| 0.032204
| 0.045085
| 0.961816
| 0.958442
| 0.958442
| 0.955988
| 0.930225
| 0.917344
| 0
| 0
| 0.25031
| 10,491
| 334
| 122
| 31.41018
| 0.829116
| 0.049566
| 0
| 0.880309
| 0
| 0
| 0.686517
| 0.271989
| 0
| 0
| 0
| 0
| 0.088803
| 1
| 0.030888
| false
| 0
| 0.003861
| 0
| 0.034749
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b7f81ce82fdefb53e2b65c99011b788e5ed36594
| 300
|
py
|
Python
|
l2tscaffolder/scaffolders/__init__.py
|
wajihyassine/l2tscaffolder
|
37c527a7137aead979b82653a3dc549a145e19ce
|
[
"Apache-2.0"
] | 2
|
2018-03-30T16:52:11.000Z
|
2018-11-07T12:11:16.000Z
|
l2tscaffolder/scaffolders/__init__.py
|
wajihyassine/l2tscaffolder
|
37c527a7137aead979b82653a3dc549a145e19ce
|
[
"Apache-2.0"
] | 37
|
2017-11-25T10:48:56.000Z
|
2018-11-21T15:58:41.000Z
|
l2tscaffolder/scaffolders/__init__.py
|
wajihyassine/l2tscaffolder
|
37c527a7137aead979b82653a3dc549a145e19ce
|
[
"Apache-2.0"
] | 5
|
2017-12-22T05:24:36.000Z
|
2018-11-20T21:01:29.000Z
|
# -*- coding: utf-8 -*-
"""This file imports Python modules that registers scaffolders."""
from l2tscaffolder.scaffolders import plaso_sqlite
from l2tscaffolder.scaffolders import timesketch_index
from l2tscaffolder.scaffolders import timesketch_sketch
from l2tscaffolder.scaffolders import turbinia
| 42.857143
| 66
| 0.836667
| 34
| 300
| 7.294118
| 0.588235
| 0.274194
| 0.451613
| 0.548387
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01845
| 0.096667
| 300
| 6
| 67
| 50
| 0.896679
| 0.276667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4d1c772577a184592e5a92ebd4c911b59554e8cd
| 2,458
|
py
|
Python
|
src/neural_toolbox/rnn.py
|
ash567/guess_what
|
092653695cb2a14cbb12df619b894d983c065a67
|
[
"Apache-2.0"
] | null | null | null |
src/neural_toolbox/rnn.py
|
ash567/guess_what
|
092653695cb2a14cbb12df619b894d983c065a67
|
[
"Apache-2.0"
] | null | null | null |
src/neural_toolbox/rnn.py
|
ash567/guess_what
|
092653695cb2a14cbb12df619b894d983c065a67
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
# For some reason, it is faster than MultiCell on tf
def variable_length_LSTM(inp, num_hidden, seq_length,
dropout_keep_prob=1.0, scope="lstm", depth=1,
layer_norm=False, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
states = []
last_states = []
rnn_states = inp
for d in range(depth):
with tf.variable_scope('lstmcell'+str(d)):
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_hidden,
layer_norm=layer_norm,
dropout_keep_prob=dropout_keep_prob,
reuse=reuse)
rnn_states, rnn_last_states = tf.nn.dynamic_rnn(
cell,
rnn_states,
dtype=tf.float32,
sequence_length=seq_length,
)
states.append(rnn_states)
last_states.append(rnn_last_states.h)
# print rnn_states
states = tf.concat(states, axis=2)
last_states = tf.concat(last_states, axis=1)
return last_states, states
# Assuming that states and last_states are not list (output of a lstm)
def variable_length_LSTM_extension(states, last_states, num_hidden, seq_length,
dropout_keep_prob=1.0, scope="lstm", depth=1,
layer_norm=False, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
rnn_states = states
states = [states]
last_states = [last_states]
for d in range(depth):
with tf.variable_scope('lstmcell'+str(d)):
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_hidden,
layer_norm=layer_norm,
dropout_keep_prob=dropout_keep_prob,
reuse=reuse)
rnn_states, rnn_last_states = tf.nn.dynamic_rnn(
cell,
rnn_states,
dtype=tf.float32,
sequence_length=seq_length,
)
states.append(rnn_states)
# TODO: Check what does .h mean
last_states.append(rnn_last_states.h)
# print rnn_states
states = tf.concat(states, axis=2)
last_states = tf.concat(last_states, axis=1)
return last_states, states
| 35.623188
| 79
| 0.542311
| 275
| 2,458
| 4.592727
| 0.258182
| 0.1346
| 0.071259
| 0.060174
| 0.777514
| 0.777514
| 0.777514
| 0.777514
| 0.777514
| 0.777514
| 0
| 0.009211
| 0.381611
| 2,458
| 68
| 80
| 36.147059
| 0.821711
| 0.074451
| 0
| 0.784314
| 0
| 0
| 0.010577
| 0
| 0
| 0
| 0
| 0.014706
| 0
| 1
| 0.039216
| false
| 0
| 0.019608
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d6076f2c086b18b6a8ab72230f6f0e76b3446dd
| 220
|
py
|
Python
|
acnportal/acnsim/events/__init__.py
|
zach401/acnportal
|
3c76892d78ae7cbdca9017f8e2a4e3114198deba
|
[
"BSD-3-Clause"
] | 32
|
2019-05-01T03:55:12.000Z
|
2022-03-27T06:37:04.000Z
|
acnportal/acnsim/events/__init__.py
|
zach401/acnportal
|
3c76892d78ae7cbdca9017f8e2a4e3114198deba
|
[
"BSD-3-Clause"
] | 66
|
2019-08-16T22:13:21.000Z
|
2022-03-10T13:50:29.000Z
|
acnportal/acnsim/events/__init__.py
|
zach401/acnportal
|
3c76892d78ae7cbdca9017f8e2a4e3114198deba
|
[
"BSD-3-Clause"
] | 21
|
2019-06-04T04:29:56.000Z
|
2022-03-27T06:36:46.000Z
|
from .event import Event
from .event import PluginEvent
from .event import UnplugEvent
from .event import RecomputeEvent
from .event_queue import EventQueue
from .acndata_events import *
from .stochastic_events import *
| 27.5
| 35
| 0.831818
| 29
| 220
| 6.206897
| 0.37931
| 0.25
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 220
| 7
| 36
| 31.428571
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4d69f903ab028be607fbe7c2dfeec62f344aed18
| 3,832
|
py
|
Python
|
tests/system/test_connection_c4500x.py
|
kstaniek/condoor
|
77c054b29d4e286c1d7aca2c74dff86b805e1fae
|
[
"Apache-2.0"
] | 7
|
2016-01-20T09:04:09.000Z
|
2020-02-25T07:14:38.000Z
|
tests/system/test_connection_c4500x.py
|
kstaniek/condoor
|
77c054b29d4e286c1d7aca2c74dff86b805e1fae
|
[
"Apache-2.0"
] | 55
|
2015-12-16T14:50:59.000Z
|
2018-04-23T15:27:15.000Z
|
tests/system/test_connection_c4500x.py
|
kstaniek/condoor
|
77c054b29d4e286c1d7aca2c74dff86b805e1fae
|
[
"Apache-2.0"
] | 19
|
2016-04-22T06:09:32.000Z
|
2022-02-25T20:21:51.000Z
|
from tests.system.common import CondoorTestCase, StopTelnetSrv, StartTelnetSrv
from tests.dmock.dmock import C4500XHandler
from tests.utils import remove_cache_file
import condoor
class TestC4500XConnection(CondoorTestCase):
@StartTelnetSrv(C4500XHandler, 10024)
def setUp(self):
CondoorTestCase.setUp(self)
@StopTelnetSrv()
def tearDown(self):
pass
def test_C4500X_1_discovery(self):
remove_cache_file()
urls = ["telnet://admin:admin@127.0.0.1:10024"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
self.conn = conn
conn.connect(self.logfile_condoor)
self.assertEqual(conn.is_discovered, True, "Not discovered properly")
self.assertEqual(conn.hostname, "Switch", "Wrong Hostname: {}".format(conn.hostname))
self.assertEqual(conn.family, "C4500-X", "Wrong Family: {}".format(conn.family))
self.assertEqual(conn.platform, "WS-C4500X-32", "Wrong Platform: {}".format(conn.platform))
self.assertEqual(conn.os_type, "XE", "Wrong OS Type: {}".format(conn.os_type))
self.assertEqual(conn.os_version, "03.08.03.E", "Wrong Version: {}".format(conn.os_version))
self.assertEqual(conn.udi['name'], "Switch System", "Wrong Name: {}".format(conn.udi['name']))
self.assertEqual(conn.udi['description'], "Cisco Systems, Inc. WS-C4500X-32 2 slot switch",
"Wrong Description: {}".format(conn.udi['description']))
self.assertEqual(conn.udi['pid'], "", "Wrong PID: {}".format(conn.udi['pid']))
self.assertEqual(conn.udi['vid'], "", "Wrong VID: {}".format(conn.udi['vid']))
self.assertEqual(conn.udi['sn'], "JAE154100BA", "Wrong S/N: {}".format(conn.udi['sn']))
self.assertEqual(conn.prompt, "Switch#", "Wrong Prompt: {}".format(conn.prompt))
self.assertEqual(conn.is_console, False, "Console connection not detected")
with self.assertRaises(condoor.CommandSyntaxError):
conn.send("wrongcommand")
conn.disconnect()
def test_C4500X_1_rediscovery(self):
urls = ["telnet://admin:admin@127.0.0.1:10024"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
self.conn = conn
conn.connect(self.logfile_condoor)
self.assertEqual(conn.is_discovered, True, "Not discovered properly")
self.assertEqual(conn.hostname, "Switch", "Wrong Hostname: {}".format(conn.hostname))
self.assertEqual(conn.family, "C4500-X", "Wrong Family: {}".format(conn.family))
self.assertEqual(conn.platform, "WS-C4500X-32", "Wrong Platform: {}".format(conn.platform))
self.assertEqual(conn.os_type, "XE", "Wrong OS Type: {}".format(conn.os_type))
self.assertEqual(conn.os_version, "03.08.03.E", "Wrong Version: {}".format(conn.os_version))
self.assertEqual(conn.udi['name'], "Switch System", "Wrong Name: {}".format(conn.udi['name']))
self.assertEqual(conn.udi['description'], "Cisco Systems, Inc. WS-C4500X-32 2 slot switch",
"Wrong Description: {}".format(conn.udi['description']))
self.assertEqual(conn.udi['pid'], "", "Wrong PID: {}".format(conn.udi['pid']))
self.assertEqual(conn.udi['vid'], "", "Wrong VID: {}".format(conn.udi['vid']))
self.assertEqual(conn.udi['sn'], "JAE154100BA", "Wrong S/N: {}".format(conn.udi['sn']))
self.assertEqual(conn.prompt, "Switch#", "Wrong Prompt: {}".format(conn.prompt))
self.assertEqual(conn.is_console, False, "Console connection not detected")
with self.assertRaises(condoor.CommandSyntaxError):
conn.send("wrongcommand")
conn.disconnect()
if __name__ == '__main__':
from unittest import main
main()
| 50.421053
| 103
| 0.656576
| 463
| 3,832
| 5.347732
| 0.196544
| 0.157512
| 0.199515
| 0.088853
| 0.837641
| 0.837641
| 0.837641
| 0.837641
| 0.837641
| 0.837641
| 0
| 0.033871
| 0.175626
| 3,832
| 75
| 104
| 51.093333
| 0.749921
| 0
| 0
| 0.711864
| 0
| 0
| 0.232777
| 0.018789
| 0
| 0
| 0
| 0
| 0.474576
| 1
| 0.067797
| false
| 0.016949
| 0.084746
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4d82045fc1126bb75f73ceda11f60b1644a02ee0
| 871
|
py
|
Python
|
sampleData.py
|
alisonsyxu/MEMSI
|
89ff153af4ff9cb69d562230c77f1daabbee6fe0
|
[
"MIT"
] | null | null | null |
sampleData.py
|
alisonsyxu/MEMSI
|
89ff153af4ff9cb69d562230c77f1daabbee6fe0
|
[
"MIT"
] | null | null | null |
sampleData.py
|
alisonsyxu/MEMSI
|
89ff153af4ff9cb69d562230c77f1daabbee6fe0
|
[
"MIT"
] | null | null | null |
import json
import random
def getSampleData():
testdata = {
"data":{
"dataSets": [{
"values": [{"x": 1, "y": random.randint(10,100)},{"x": 2, "y": random.randint(10,100)},{"x": 3, "y": random.randint(10,100)},{"x": 4, "y": random.randint(10,100)},{"x": 5, "y": random.randint(10,100)},{"x": 6, "y": random.randint(10,100)},{"x": 7, "y": random.randint(10,100)},{"x": 8, "y": random.randint(10,100)}],
"label": 'A',
}, {
"values": [{"x": 1, "y": random.randint(10,100)},{"x": 2, "y": random.randint(10,100)},{"x": 3, "y": random.randint(10,100)},{"x": 4, "y": random.randint(10,100)},{"x": 5, "y": random.randint(10,100)},{"x": 6, "y": random.randint(10,100)},{"x": 7, "y": random.randint(10,100)},{"x": 8, "y": random.randint(10,100)}],
"label": 'B',
}],
}
}
return testdata
| 54.4375
| 328
| 0.495982
| 129
| 871
| 3.348837
| 0.20155
| 0.259259
| 0.518519
| 0.592593
| 0.828704
| 0.828704
| 0.828704
| 0.828704
| 0.828704
| 0.828704
| 0
| 0.137143
| 0.196326
| 871
| 16
| 329
| 54.4375
| 0.48
| 0
| 0
| 0.133333
| 0
| 0
| 0.077982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4daec7b161a9fa78d6c56825e525bb1c8bca0421
| 16,377
|
py
|
Python
|
example_problems/tutorial/triangle/services/eval_number_of_triangles_in_triangle_server.py
|
romeorizzi/TALight
|
2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e
|
[
"MIT"
] | 4
|
2021-06-27T13:27:24.000Z
|
2022-03-24T10:46:28.000Z
|
example_problems/tutorial/triangle/services/eval_number_of_triangles_in_triangle_server.py
|
romeorizzi/TALight
|
2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e
|
[
"MIT"
] | null | null | null |
example_problems/tutorial/triangle/services/eval_number_of_triangles_in_triangle_server.py
|
romeorizzi/TALight
|
2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e
|
[
"MIT"
] | 5
|
2021-04-01T15:21:57.000Z
|
2022-01-29T15:07:38.000Z
|
#!/usr/bin/env python3
from sys import stderr, exit
import random
import math
from time import monotonic
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
import triangle_lib as tl
# METADATA OF THIS TAL_SERVICE:
args_list = [
('goal',str),
('code_lang',str),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
# INSTANCES FOR GOAL = correct
instances = { 'correct' : [] }
MIN_VAL = 0
MAX_VAL = 1
MIN_SMALL_N = 2
MAX_SMALL_N = 2
MIN_BIG_N = 5
MAX_BIG_N = 10
NUM_INSTANCES = 5
for i in range (NUM_INSTANCES):
small_seed = random.randint(100000,999999)
big_seed = random.randint(100000,999999)
couple = [tl.random_triangle(MIN_SMALL_N, MIN_VAL, MAX_VAL, small_seed, TAc, LANG),tl.random_triangle(MIN_BIG_N+i, MIN_VAL, MAX_VAL, big_seed, TAc, LANG)]
instances['correct'].append([couple, [MIN_SMALL_N,MIN_BIG_N+i], MIN_VAL, MAX_VAL, [small_seed,big_seed]])
# INSTANCES FOR GOAL = 2^n o n^2
# SMALL INSTANCES
if ENV["goal"] == 'time_at_most_2_exp_n' or ENV["goal"] =='time_at_most_n_exp_2':
instances['time_at_most_2_exp_n'] = []
NUM_INSTANCES = 6
MIN_SMALL_N = 2
MAX_SMALL_N = 4
MIN_BIG_N = 7
MAX_BIG_N = 12
scaling_factor = 1.1
if ENV["code_lang"] == "compiled":
MAX_BIG_N = 18
scaling_factor = 1.2
for n in range(NUM_INSTANCES):
small_seed = random.randint(100000,999999)
big_seed = random.randint(100000,999999)
couple = [tl.random_triangle(MIN_SMALL_N, MIN_VAL, MAX_VAL, small_seed, TAc, LANG),tl.random_triangle(MIN_BIG_N, MIN_VAL, MAX_VAL, big_seed, TAc, LANG)]
instances['time_at_most_2_exp_n'].append([couple, [MIN_SMALL_N,MIN_BIG_N], MIN_VAL, MAX_VAL, [small_seed,big_seed]])
MIN_SMALL_N = math.ceil(scaling_factor*MIN_SMALL_N)
if MIN_SMALL_N > MAX_SMALL_N:
MIN_SMALL_N = MAX_SMALL_N
MIN_BIG_N = math.ceil(scaling_factor*MIN_BIG_N)
if MIN_BIG_N > MAX_BIG_N:
MIN_BIG_N = MAX_BIG_N
# INSTANCES FOR GOAL = n^2
# SMALL INSTANCES
if ENV["goal"] == 'time_at_most_n_exp_2':
instances['time_at_most_n_exp_2'] = []
NUM_INSTANCES = 15
MIN_SMALL_N = 2
MAX_SMALL_N = 4
MIN_BIG_N = 20
MAX_BIG_N = 50
scaling_factor = 1.1
if ENV["code_lang"] == "compiled":
MIN_BIG_N = 30
MAX_BIG_N = 100
for _ in range(NUM_INSTANCES):
small_seed = random.randint(100000,999999)
big_seed = random.randint(100000,999999)
couple = [tl.random_triangle(MIN_SMALL_N, MIN_VAL, MAX_VAL, small_seed, TAc, LANG),tl.random_triangle(MIN_BIG_N, MIN_VAL, MAX_VAL, big_seed, TAc, LANG)]
instances['time_at_most_n_exp_2'].append([couple, [MIN_SMALL_N,MIN_BIG_N], MIN_VAL, MAX_VAL, [small_seed,big_seed]])
MIN_SMALL_N = math.ceil(scaling_factor*MIN_SMALL_N)
if MIN_SMALL_N > MAX_SMALL_N:
MIN_SMALL_N = MAX_SMALL_N
MIN_BIG_N = math.ceil(scaling_factor*MIN_BIG_N)
if MIN_BIG_N > MAX_BIG_N:
MIN_BIG_N = MAX_BIG_N
MAX_TIME = 2
#CHECK TIME ELAPSED FOR correct
if ENV["goal"] == 'correct':
visited_instances_correct = []
for instance in instances['correct']:
small_triangle = instance[0][0]
big_triangle = instance[0][1]
small = tl.cast_to_array(small_triangle)
big = tl.cast_to_array(big_triangle)
l = len(small_triangle)
L = len(big_triangle)
TAc.print(LANG.render_feedback("small-triangle-size", f'The small triangle has this number of rows:'), "white")
print(l)
TAc.print(LANG.render_feedback("big-triangle-size", f'The big triangle has this number of rows:'), "white")
print(L)
TAc.print(LANG.render_feedback("rough-small-triangle", f'The small triangle can be seen as a list of lists. In this case we have:'), "white")
print(small)
TAc.print(LANG.render_feedback("rough-big-triangle", f'The big triangle can be seen as a list of lists. In this case we have:'), "white")
print(big)
TAc.print(LANG.render_feedback("fit-question", f'How many times does the small triangle fit in the big triangle?'), "white")
start = monotonic()
answer = int(TALinput(str, line_recognizer=lambda path,TAc,LANG:True, TAc=TAc, LANG=LANG)[0])
end = monotonic()
time = end-start
right_answer = 0
livello = 1
indexes = []
for i in range(int(((L-l+1)*(L-l+2))/2)):
if i >= livello*(livello+1)/2:
livello +=1
if big[i] == small[0]:
if tl.fits(i,livello,big,small,l)[0]:
indexes.append(tl.fits(i,livello,big,small,l)[1])
right_answer += 1
if time > MAX_TIME:
visited_instances_correct.append([instance,time,"out_of_time"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
exit(0)
elif answer != right_answer:
visited_instances_correct.append([instance,time,"wrong"])
else:
visited_instances_correct.append([instance,time,"right"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
exit(0)
#CHECK TIME ELAPSED FOR time_at_most_2_exp_n
elif ENV["goal"] == 'time_at_most_2_exp_n':
visited_instances_correct = []
for instance in instances['correct']:
small_triangle = instance[0][0]
big_triangle = instance[0][1]
small = tl.cast_to_array(small_triangle)
big = tl.cast_to_array(big_triangle)
l = len(small_triangle)
L = len(big_triangle)
TAc.print(LANG.render_feedback("small-triangle-size", f'The small triangle has this number of rows:'), "white")
print(l)
TAc.print(LANG.render_feedback("big-triangle-size", f'The big triangle has this number of rows:'), "white")
print(L)
TAc.print(LANG.render_feedback("rough-small-triangle", f'The small triangle can be seen as a list of lists. In this case we have:'), "white")
print(small)
TAc.print(LANG.render_feedback("rough-big-triangle", f'The big triangle can be seen as a list of lists. In this case we have:'), "white")
print(big)
TAc.print(LANG.render_feedback("fit-question", f'How many times does the small triangle fit in the big triangle?'), "white")
start = monotonic()
answer = int(TALinput(str, line_recognizer=lambda path,TAc,LANG:True, TAc=TAc, LANG=LANG)[0])
end = monotonic()
time = end-start
right_answer = 0
livello = 1
indexes = []
for i in range(int(((L-l+1)*(L-l+2))/2)):
if i >= livello*(livello+1)/2:
livello +=1
if big[i] == small[0]:
if tl.fits(i,livello,big,small,l)[0]:
indexes.append(tl.fits(i,livello,big,small,l)[1])
right_answer += 1
if time > MAX_TIME:
visited_instances_correct.append([instance,time,"out_of_time"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
exit(0)
elif answer != right_answer:
visited_instances_correct.append([instance,time,"wrong"])
else:
visited_instances_correct.append([instance,time,"right"])
visited_instances_2_exp_n = []
for instance in instances['time_at_most_2_exp_n']:
small_triangle = instance[0][0]
big_triangle = instance[0][1]
small = tl.cast_to_array(small_triangle)
big = tl.cast_to_array(big_triangle)
l = len(small_triangle)
L = len(big_triangle)
TAc.print(LANG.render_feedback("small-triangle-size", f'The small triangle has this number of rows:'), "white")
print(l)
TAc.print(LANG.render_feedback("big-triangle-size", f'The big triangle has this number of rows:'), "white")
print(L)
TAc.print(LANG.render_feedback("rough-small-triangle", f'The small triangle can be seen as a list of lists. In this case we have:'), "white")
print(small)
TAc.print(LANG.render_feedback("rough-big-triangle", f'The big triangle can be seen as a list of lists. In this case we have:'), "white")
print(big)
TAc.print(LANG.render_feedback("fit-question", f'How many times does the small triangle fit in the big triangle?'), "white")
start = monotonic()
answer = int(TALinput(str, line_recognizer=lambda path,TAc,LANG:True, TAc=TAc, LANG=LANG)[0])
end = monotonic()
time = end-start
right_answer = 0
livello = 1
indexes = []
for i in range(int(((L-l+1)*(L-l+2))/2)):
if i >= livello*(livello+1)/2:
livello +=1
if big[i] == small[0]:
if tl.fits(i,livello,big,small,l)[0]:
indexes.append(tl.fits(i,livello,big,small,l)[1])
right_answer += 1
if time > MAX_TIME:
visited_instances_2_exp_n.append([instance,time,"out_of_time"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
tl.print_goal_summary('time_at_most_2_exp_n',visited_instances_2_exp_n,TAc,LANG)
exit(0)
elif answer != right_answer:
visited_instances_2_exp_n.append([instance,time,"wrong"])
else:
visited_instances_2_exp_n.append([instance,time,"right"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
tl.print_goal_summary('time_at_most_2_exp_n',visited_instances_2_exp_n,TAc,LANG)
exit(0)
#CHECK TIME ELAPSED FOR time_at_most_n_exp_2
else:
visited_instances_correct = []
for instance in instances['correct']:
small_triangle = instance[0][0]
big_triangle = instance[0][1]
small = tl.cast_to_array(small_triangle)
big = tl.cast_to_array(big_triangle)
l = len(small_triangle)
L = len(big_triangle)
TAc.print(LANG.render_feedback("small-triangle-size", f'The small triangle has this number of rows:'), "white")
print(l)
TAc.print(LANG.render_feedback("big-triangle-size", f'The big triangle has this number of rows:'), "white")
print(L)
TAc.print(LANG.render_feedback("rough-small-triangle", f'The small triangle can be seen as a list of lists. In this case we have:'), "white")
print(small)
TAc.print(LANG.render_feedback("rough-big-triangle", f'The big triangle can be seen as a list of lists. In this case we have:'), "white")
print(big)
TAc.print(LANG.render_feedback("fit-question", f'How many times does the small triangle fit in the big triangle?'), "white")
start = monotonic()
answer = int(TALinput(str, line_recognizer=lambda path,TAc,LANG:True, TAc=TAc, LANG=LANG)[0])
end = monotonic()
time = end-start
right_answer = 0
livello = 1
indexes = []
for i in range(int(((L-l+1)*(L-l+2))/2)):
if i >= livello*(livello+1)/2:
livello +=1
if big[i] == small[0]:
if tl.fits(i,livello,big,small,l)[0]:
indexes.append(tl.fits(i,livello,big,small,l)[1])
right_answer += 1
if time > MAX_TIME:
visited_instances_correct.append([instance,time,"out_of_time"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
exit(0)
elif answer != right_answer:
visited_instances_correct.append([instance,time,"wrong"])
else:
visited_instances_correct.append([instance,time,"right"])
visited_instances_2_exp_n = []
for instance in instances['time_at_most_2_exp_n']:
small_triangle = instance[0][0]
big_triangle = instance[0][1]
small = tl.cast_to_array(small_triangle)
big = tl.cast_to_array(big_triangle)
l = len(small_triangle)
L = len(big_triangle)
TAc.print(LANG.render_feedback("small-triangle-size", f'The small triangle has this number of rows:'), "white")
print(l)
TAc.print(LANG.render_feedback("big-triangle-size", f'The big triangle has this number of rows:'), "white")
print(L)
TAc.print(LANG.render_feedback("rough-small-triangle", f'The small triangle can be seen as a list of lists. In this case we have:'), "white")
print(small)
TAc.print(LANG.render_feedback("rough-big-triangle", f'The big triangle can be seen as a list of lists. In this case we have:'), "white")
print(big)
TAc.print(LANG.render_feedback("fit-question", f'How many times does the small triangle fit in the big triangle?'), "white")
start = monotonic()
answer = int(TALinput(str, line_recognizer=lambda path,TAc,LANG:True, TAc=TAc, LANG=LANG)[0])
end = monotonic()
time = end-start
right_answer = 0
livello = 1
indexes = []
for i in range(int(((L-l+1)*(L-l+2))/2)):
if i >= livello*(livello+1)/2:
livello +=1
if big[i] == small[0]:
if tl.fits(i,livello,big,small,l)[0]:
indexes.append(tl.fits(i,livello,big,small,l)[1])
right_answer += 1
if time > MAX_TIME:
visited_instances_2_exp_n.append([instance,time,"out_of_time"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
tl.print_goal_summary('time_at_most_2_exp_n',visited_instances_2_exp_n,TAc,LANG)
exit(0)
elif answer != right_answer:
visited_instances_2_exp_n.append([instance,time,"wrong"])
else:
visited_instances_2_exp_n.append([instance,time,"right"])
visited_instances_n_exp_2 = []
for instance in instances['time_at_most_n_exp_2']:
small_triangle = instance[0][0]
big_triangle = instance[0][1]
small = tl.cast_to_array(small_triangle)
big = tl.cast_to_array(big_triangle)
l = len(small_triangle)
L = len(big_triangle)
TAc.print(LANG.render_feedback("small-triangle-size", f'The small triangle has this number of rows:'), "white")
print(l)
TAc.print(LANG.render_feedback("big-triangle-size", f'The big triangle has this number of rows:'), "white")
print(L)
TAc.print(LANG.render_feedback("rough-small-triangle", f'The small triangle can be seen as a list of lists. In this case we have:'), "white")
print(small)
TAc.print(LANG.render_feedback("rough-big-triangle", f'The big triangle can be seen as a list of lists. In this case we have:'), "white")
print(big)
TAc.print(LANG.render_feedback("fit-question", f'How many times does the small triangle fit in the big triangle?'), "white")
start = monotonic()
answer = int(TALinput(str, line_recognizer=lambda path,TAc,LANG:True, TAc=TAc, LANG=LANG)[0])
end = monotonic()
time = end-start
right_answer = 0
livello = 1
indexes = []
for i in range(int(((L-l+1)*(L-l+2))/2)):
if i >= livello*(livello+1)/2:
livello +=1
if big[i] == small[0]:
if tl.fits(i,livello,big,small,l)[0]:
indexes.append(tl.fits(i,livello,big,small,l)[1])
right_answer += 1
if time > MAX_TIME:
visited_instances_n_exp_2.append([instance,time,"out_of_time"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
tl.print_goal_summary('time_at_most_2_exp_n',visited_instances_2_exp_n,TAc,LANG)
tl.print_goal_summary('time_at_most_n_exp_2',visited_instances_n_exp_2,TAc,LANG)
exit(0)
elif answer != right_answer:
visited_instances_n_exp_2.append([instance,time,"wrong"])
else:
visited_instances_n_exp_2.append([instance,time,"right"])
tl.print_goal_summary('correct',visited_instances_correct,TAc,LANG)
tl.print_goal_summary('time_at_most_2_exp_n',visited_instances_2_exp_n,TAc,LANG)
tl.print_goal_summary('time_at_most_n_exp_2',visited_instances_n_exp_2,TAc,LANG)
exit(0)
| 45.365651
| 160
| 0.633327
| 2,422
| 16,377
| 4.055326
| 0.054913
| 0.063531
| 0.036652
| 0.054979
| 0.938913
| 0.938404
| 0.935756
| 0.927102
| 0.91346
| 0.902566
| 0
| 0.021203
| 0.245466
| 16,377
| 360
| 161
| 45.491667
| 0.773651
| 0.019417
| 0
| 0.844721
| 0
| 0
| 0.191313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021739
| 0
| 0.021739
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1502604bc24e5260abb118926913c541cf2e3330
| 145
|
py
|
Python
|
build/lib/abdal_net_py/__init__.py
|
abdal-security-group/abdal-net-py
|
209035681836cb80553b10d6f885fab650a76aa3
|
[
"MIT"
] | null | null | null |
build/lib/abdal_net_py/__init__.py
|
abdal-security-group/abdal-net-py
|
209035681836cb80553b10d6f885fab650a76aa3
|
[
"MIT"
] | null | null | null |
build/lib/abdal_net_py/__init__.py
|
abdal-security-group/abdal-net-py
|
209035681836cb80553b10d6f885fab650a76aa3
|
[
"MIT"
] | null | null | null |
from .abdal_net_py_about import *
from .abdal_net_py_generator import *
from .abdal_net_py_loger import *
from .abdal_net_py_unit import *
| 24.166667
| 38
| 0.793103
| 24
| 145
| 4.291667
| 0.375
| 0.349515
| 0.466019
| 0.543689
| 0.582524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151724
| 145
| 5
| 39
| 29
| 0.837398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
1516466b6a5b071db586387c137b310215c6ec09
| 25,586
|
py
|
Python
|
VMC Pygame/VMC Pygame - Homework/VMC Pygame - Certification Game/VMC Pygame - Certification Game.py
|
arshsaxena/VMC-CBJr
|
15f95ce91c85e28639d66bea7a54502a9bc78b78
|
[
"MIT"
] | 3
|
2020-10-06T06:00:29.000Z
|
2020-10-06T17:49:32.000Z
|
Sudoku with Solver/Sudoku with Solver.py
|
arshsaxena/PythonGamesAndProjects
|
52a2a75b8a0d81646052164f5143428ce0c62cb0
|
[
"MIT"
] | null | null | null |
Sudoku with Solver/Sudoku with Solver.py
|
arshsaxena/PythonGamesAndProjects
|
52a2a75b8a0d81646052164f5143428ce0c62cb0
|
[
"MIT"
] | 1
|
2020-10-01T05:47:32.000Z
|
2020-10-01T05:47:32.000Z
|
import pygame
import requests
pygame.init()
width = 500
height = 635
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Sudoku by Arsh Saxena")
icon = pygame.image.load('resources/icon.png')
pygame.display.set_icon(icon)
background = pygame.image.load('resources/menu.png')
font_bold_18 = pygame.font.Font('resources/sf-bold.otf', 18)
font_18 = pygame.font.Font('resources/sf-reg.otf', 18)
font_30 = pygame.font.Font('resources/sf-reg.otf', 30)
font_bold_30 = pygame.font.Font('resources/sf-bold.otf', 30)
font_40 = pygame.font.Font('resources/sf-mono.otf', 40)
level = 1
x = 0
y = 0
dif = 500 / 9
value = 0
def easy():
global x, y, dif, value
response = requests.get("https://sugoku.herokuapp.com/board?difficulty=easy")
grid = response.json()['board']
"""
If you don't want random values just remove or comment lines 30, 31, and 238 and uncomment lines 35-45 and 239-249.
"""
# grid = [
# [7, 8, 0, 4, 0, 0, 1, 2, 0],
# [6, 0, 0, 0, 7, 5, 0, 0, 9],
# [0, 0, 0, 6, 0, 1, 0, 7, 8],
# [0, 0, 7, 0, 4, 0, 2, 6, 0],
# [0, 0, 1, 0, 5, 0, 9, 3, 0],
# [9, 0, 4, 0, 6, 0, 0, 0, 5],
# [0, 7, 0, 3, 0, 0, 0, 1, 2],
# [1, 2, 0, 0, 0, 7, 4, 0, 0],
# [0, 4, 9, 2, 0, 6, 0, 0, 7]
# ]
def get_cord(pos):
global x, y
x = pos[0] // dif
y = pos[1] // dif
def draw_box():
for i in range(2):
pygame.draw.line(screen, (255, 153, 0), (x * dif - 3, (y + i) * dif), (x * dif + dif + 3, (y + i) * dif), 7)
pygame.draw.line(screen, (255, 153, 0), ((x + i) * dif, y * dif), ((x + i) * dif, y * dif + dif), 7)
def draw():
for i in range(9):
for j in range(9):
if grid[i][j] != 0:
pygame.draw.rect(screen, (0, 200, 0), (i * dif, j * dif, dif + 1, dif + 1))
text1 = font_40.render(str(grid[i][j]), 1, (255, 255, 255))
screen.blit(text1, (i * dif + 15, j * dif + 4))
for i in range(10):
if i % 3 == 0:
thick = 7
else:
thick = 1
pygame.draw.line(screen, (0, 0, 0), (0, i * dif), (500, i * dif), thick)
pygame.draw.line(screen, (0, 0, 0), (i * dif, 0), (i * dif, 500), thick)
def draw_value(value):
text1 = font_40.render(str(value), 1, (0, 0, 0))
screen.blit(text1, (x * dif + 15, y * dif + 15))
def raise_error1():
text1 = font_40.render("Wrong!", 1, (0, 0, 0))
screen.blit(text1, (20, 570))
def raise_error2():
text1 = font_40.render("Wrong, not a valid key.", 1, (0, 0, 0))
screen.blit(text1, (20, 570))
def valid(m, i, j, value):
for it in range(9):
if m[i][it] == value:
return False
if m[it][j] == value:
return False
it = i // 3
jt = j // 3
for i in range(it * 3, it * 3 + 3):
for j in range(jt * 3, jt * 3 + 3):
if m[i][j] == value:
return False
return True
def solve(grid, i, j):
while grid[i][j] != 0:
if i < 8:
i += 1
elif i == 8 and j < 8:
i = 0
j += 1
elif i == 8 and j == 8:
return True
pygame.event.pump()
for it in range(1, 10):
if valid(grid, i, j, it) == True:
grid[i][j] = it
global x, y
x = i
y = j
screen.fill((255, 255, 255))
draw()
draw_box()
pygame.display.update()
pygame.time.delay(20)
if solve(grid, i, j) == 1:
return True
else:
grid[i][j] = 0
screen.fill((255, 255, 255))
draw()
draw_box()
pygame.display.update()
pygame.time.delay(50)
return False
def instruction():
dif_msg = font_18.render("Difficulty: EASY", 1, (0, 0, 0))
text1 = font_18.render("Press D to reset values to default. Press R to clear", 1, (0, 0, 0))
text2 = font_18.render("entered values. Press ENTER to visualize.", 1, (0, 0, 0))
screen.blit(dif_msg, (20, 520))
screen.blit(text1, (20, 540))
screen.blit(text2, (20, 560))
def result():
text1 = font_30.render("FINISHED! QUIT or press R or D.", 1, (0, 0, 0))
screen.blit(text1, (20, 590))
running = True
flag1 = 0
flag2 = 0
rs = 0
error = 0
while running:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
flag1 = 1
pos = pygame.mouse.get_pos()
get_cord(pos)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x -= 1
flag1 = 1
if event.key == pygame.K_RIGHT:
x += 1
flag1 = 1
if event.key == pygame.K_UP:
y -= 1
flag1 = 1
if event.key == pygame.K_DOWN:
y += 1
flag1 = 1
if event.key == pygame.K_ESCAPE:
menu()
if event.key == pygame.K_1:
value = 1
if event.key == pygame.K_2:
value = 2
if event.key == pygame.K_3:
value = 3
if event.key == pygame.K_4:
value = 4
if event.key == pygame.K_5:
value = 5
if event.key == pygame.K_6:
value = 6
if event.key == pygame.K_7:
value = 7
if event.key == pygame.K_8:
value = 8
if event.key == pygame.K_9:
value = 9
if event.key == pygame.K_RETURN:
flag2 = 1
if event.key == pygame.K_r:
rs = 0
error = 0
flag2 = 0
grid = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
if event.key == pygame.K_d:
rs = 0
error = 0
flag2 = 0
grid = response.json()['board']
# grid = [
# [7, 8, 0, 4, 0, 0, 1, 2, 0],
# [6, 0, 0, 0, 7, 5, 0, 0, 9],
# [0, 0, 0, 6, 0, 1, 0, 7, 8],
# [0, 0, 7, 0, 4, 0, 2, 6, 0],
# [0, 0, 1, 0, 5, 0, 9, 3, 0],
# [9, 0, 4, 0, 6, 0, 0, 0, 5],
# [0, 7, 0, 3, 0, 0, 0, 1, 2],
# [1, 2, 0, 0, 0, 7, 4, 0, 0],
# [0, 4, 9, 2, 0, 6, 0, 0, 7]
# ]
if flag2 == 1:
if solve(grid, 0, 0) == False:
error = 1
else:
rs = 1
flag2 = 0
if value != 0:
draw_value(value)
if valid(grid, int(x), int(y), value) == True:
grid[int(x)][int(y)] = value
flag1 = 0
else:
grid[int(x)][int(y)] = 0
raise_error2()
value = 0
if error == 1:
raise_error1()
if rs == 1:
result()
draw()
if flag1 == 1:
draw_box()
instruction()
pygame.display.update()
pygame.quit()
def medium():
global x, y, dif, value
response = requests.get("https://sugoku.herokuapp.com/board?difficulty=medium")
grid = response.json()['board']
"""
If you don't want random values just remove or comment lines 288, 289, and 497 and uncomment lines 293-303 and 498-508.
"""
# grid = [
# [7, 8, 0, 4, 0, 0, 1, 2, 0],
# [6, 0, 0, 0, 7, 5, 0, 0, 9],
# [0, 0, 0, 6, 0, 1, 0, 7, 8],
# [0, 0, 7, 0, 4, 0, 2, 6, 0],
# [0, 0, 1, 0, 5, 0, 9, 3, 0],
# [9, 0, 4, 0, 6, 0, 0, 0, 5],
# [0, 7, 0, 3, 0, 0, 0, 1, 2],
# [1, 2, 0, 0, 0, 7, 4, 0, 0],
# [0, 4, 9, 2, 0, 6, 0, 0, 7]
# ]
def get_cord(pos):
global x
x = pos[0] // dif
global y
y = pos[1] // dif
def draw_box():
for i in range(2):
pygame.draw.line(screen, (255, 153, 0), (x * dif - 3, (y + i) * dif), (x * dif + dif + 3, (y + i) * dif), 7)
pygame.draw.line(screen, (255, 153, 0), ((x + i) * dif, y * dif), ((x + i) * dif, y * dif + dif), 7)
def draw():
for i in range(9):
for j in range(9):
if grid[i][j] != 0:
pygame.draw.rect(screen, (0, 200, 0), (i * dif, j * dif, dif + 1, dif + 1))
text1 = font_40.render(str(grid[i][j]), 1, (255, 255, 255))
screen.blit(text1, (i * dif + 15, j * dif + 4))
for i in range(10):
if i % 3 == 0:
thick = 7
else:
thick = 1
pygame.draw.line(screen, (0, 0, 0), (0, i * dif), (500, i * dif), thick)
pygame.draw.line(screen, (0, 0, 0), (i * dif, 0), (i * dif, 500), thick)
def draw_value(value):
text1 = font_40.render(str(value), 1, (0, 0, 0))
screen.blit(text1, (x * dif + 15, y * dif + 15))
def raise_error1():
text1 = font_40.render("Wrong!", 1, (0, 0, 0))
screen.blit(text1, (20, 570))
def raise_error2():
text1 = font_40.render("Wrong, not a valid key.", 1, (0, 0, 0))
screen.blit(text1, (20, 570))
def valid(m, i, j, value):
for it in range(9):
if m[i][it] == value:
return False
if m[it][j] == value:
return False
it = i // 3
jt = j // 3
for i in range(it * 3, it * 3 + 3):
for j in range(jt * 3, jt * 3 + 3):
if m[i][j] == value:
return False
return True
def solve(grid, i, j):
while grid[i][j] != 0:
if i < 8:
i += 1
elif i == 8 and j < 8:
i = 0
j += 1
elif i == 8 and j == 8:
return True
pygame.event.pump()
for it in range(1, 10):
if valid(grid, i, j, it) == True:
grid[i][j] = it
global x, y
x = i
y = j
screen.fill((255, 255, 255))
draw()
draw_box()
pygame.display.update()
pygame.time.delay(20)
if solve(grid, i, j) == 1:
return True
else:
grid[i][j] = 0
screen.fill((255, 255, 255))
draw()
draw_box()
pygame.display.update()
pygame.time.delay(50)
return False
def instruction():
dif_msg = font_18.render("Difficulty: MEDIUM", 1, (0, 0, 0))
text1 = font_18.render("Press D to reset values to default. Press R to clear", 1, (0, 0, 0))
text2 = font_18.render("entered values. Press ENTER to visualize.", 1, (0, 0, 0))
screen.blit(dif_msg, (20, 520))
screen.blit(text1, (20, 540))
screen.blit(text2, (20, 560))
def result():
text1 = font_30.render("FINISHED! QUIT or press R or D.", 1, (0, 0, 0))
screen.blit(text1, (20, 590))
running = True
flag1 = 0
flag2 = 0
rs = 0
error = 0
while running:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
flag1 = 1
pos = pygame.mouse.get_pos()
get_cord(pos)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x -= 1
flag1 = 1
if event.key == pygame.K_RIGHT:
x += 1
flag1 = 1
if event.key == pygame.K_UP:
y -= 1
flag1 = 1
if event.key == pygame.K_DOWN:
y += 1
flag1 = 1
if event.key == pygame.K_ESCAPE:
menu()
if event.key == pygame.K_1:
value = 1
if event.key == pygame.K_2:
value = 2
if event.key == pygame.K_3:
value = 3
if event.key == pygame.K_4:
value = 4
if event.key == pygame.K_5:
value = 5
if event.key == pygame.K_6:
value = 6
if event.key == pygame.K_7:
value = 7
if event.key == pygame.K_8:
value = 8
if event.key == pygame.K_9:
value = 9
if event.key == pygame.K_RETURN:
flag2 = 1
if event.key == pygame.K_r:
rs = 0
error = 0
flag2 = 0
grid = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
if event.key == pygame.K_d:
rs = 0
error = 0
flag2 = 0
grid = response.json()['board']
# grid = [
# [7, 8, 0, 4, 0, 0, 1, 2, 0],
# [6, 0, 0, 0, 7, 5, 0, 0, 9],
# [0, 0, 0, 6, 0, 1, 0, 7, 8],
# [0, 0, 7, 0, 4, 0, 2, 6, 0],
# [0, 0, 1, 0, 5, 0, 9, 3, 0],
# [9, 0, 4, 0, 6, 0, 0, 0, 5],
# [0, 7, 0, 3, 0, 0, 0, 1, 2],
# [1, 2, 0, 0, 0, 7, 4, 0, 0],
# [0, 4, 9, 2, 0, 6, 0, 0, 7]
# ]
if flag2 == 1:
if solve(grid, 0, 0) == False:
error = 1
else:
rs = 1
flag2 = 0
if value != 0:
draw_value(value)
if valid(grid, int(x), int(y), value) == True:
grid[int(x)][int(y)] = value
flag1 = 0
else:
grid[int(x)][int(y)] = 0
raise_error2()
value = 0
if error == 1:
raise_error1()
if rs == 1:
result()
draw()
if flag1 == 1:
draw_box()
instruction()
pygame.display.update()
pygame.quit()
def hard():
global x, y, dif, value
response = requests.get("https://sugoku.herokuapp.com/board?difficulty=hard")
grid = response.json()['board']
"""
If you don't want random values just remove or comment lines 547, 548, and 756 and uncomment lines 552-562 and 757-767.
"""
# grid = [
# [7, 8, 0, 4, 0, 0, 1, 2, 0],
# [6, 0, 0, 0, 7, 5, 0, 0, 9],
# [0, 0, 0, 6, 0, 1, 0, 7, 8],
# [0, 0, 7, 0, 4, 0, 2, 6, 0],
# [0, 0, 1, 0, 5, 0, 9, 3, 0],
# [9, 0, 4, 0, 6, 0, 0, 0, 5],
# [0, 7, 0, 3, 0, 0, 0, 1, 2],
# [1, 2, 0, 0, 0, 7, 4, 0, 0],
# [0, 4, 9, 2, 0, 6, 0, 0, 7]
# ]
def get_cord(pos):
global x
x = pos[0] // dif
global y
y = pos[1] // dif
def draw_box():
for i in range(2):
pygame.draw.line(screen, (255, 153, 0), (x * dif - 3, (y + i) * dif), (x * dif + dif + 3, (y + i) * dif), 7)
pygame.draw.line(screen, (255, 153, 0), ((x + i) * dif, y * dif), ((x + i) * dif, y * dif + dif), 7)
def draw():
for i in range(9):
for j in range(9):
if grid[i][j] != 0:
pygame.draw.rect(screen, (0, 200, 0), (i * dif, j * dif, dif + 1, dif + 1))
text1 = font_40.render(str(grid[i][j]), 1, (255, 255, 255))
screen.blit(text1, (i * dif + 15, j * dif + 4))
for i in range(10):
if i % 3 == 0:
thick = 7
else:
thick = 1
pygame.draw.line(screen, (0, 0, 0), (0, i * dif), (500, i * dif), thick)
pygame.draw.line(screen, (0, 0, 0), (i * dif, 0), (i * dif, 500), thick)
def draw_value(value):
text1 = font_40.render(str(value), 1, (0, 0, 0))
screen.blit(text1, (x * dif + 15, y * dif + 15))
def raise_error1():
text1 = font_40.render("Wrong!", 1, (0, 0, 0))
screen.blit(text1, (20, 570))
def raise_error2():
text1 = font_40.render("Wrong, not a valid key.", 1, (0, 0, 0))
screen.blit(text1, (20, 570))
def valid(m, i, j, value):
for it in range(9):
if m[i][it] == value:
return False
if m[it][j] == value:
return False
it = i // 3
jt = j // 3
for i in range(it * 3, it * 3 + 3):
for j in range(jt * 3, jt * 3 + 3):
if m[i][j] == value:
return False
return True
def solve(grid, i, j):
while grid[i][j] != 0:
if i < 8:
i += 1
elif i == 8 and j < 8:
i = 0
j += 1
elif i == 8 and j == 8:
return True
pygame.event.pump()
for it in range(1, 10):
if valid(grid, i, j, it) == True:
grid[i][j] = it
global x, y
x = i
y = j
screen.fill((255, 255, 255))
draw()
draw_box()
pygame.display.update()
pygame.time.delay(20)
if solve(grid, i, j) == 1:
return True
else:
grid[i][j] = 0
screen.fill((255, 255, 255))
draw()
draw_box()
pygame.display.update()
pygame.time.delay(50)
return False
def instruction():
dif_msg = font_18.render("Difficulty: HARD", 1, (0, 0, 0))
text1 = font_18.render("Press D to reset values to default. Press R to clear", 1, (0, 0, 0))
text2 = font_18.render("entered values. Press ENTER to visualize.", 1, (0, 0, 0))
screen.blit(dif_msg, (20, 520))
screen.blit(text1, (20, 540))
screen.blit(text2, (20, 560))
def result():
text1 = font_30.render("FINISHED! QUIT or press R or D.", 1, (0, 0, 0))
screen.blit(text1, (20, 590))
running = True
flag1 = 0
flag2 = 0
rs = 0
error = 0
while running:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
flag1 = 1
pos = pygame.mouse.get_pos()
get_cord(pos)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x -= 1
flag1 = 1
if event.key == pygame.K_RIGHT:
x += 1
flag1 = 1
if event.key == pygame.K_UP:
y -= 1
flag1 = 1
if event.key == pygame.K_DOWN:
y += 1
flag1 = 1
if event.key == pygame.K_ESCAPE:
menu()
if event.key == pygame.K_1:
value = 1
if event.key == pygame.K_2:
value = 2
if event.key == pygame.K_3:
value = 3
if event.key == pygame.K_4:
value = 4
if event.key == pygame.K_5:
value = 5
if event.key == pygame.K_6:
value = 6
if event.key == pygame.K_7:
value = 7
if event.key == pygame.K_8:
value = 8
if event.key == pygame.K_9:
value = 9
if event.key == pygame.K_RETURN:
flag2 = 1
if event.key == pygame.K_r:
rs = 0
error = 0
flag2 = 0
grid = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
if event.key == pygame.K_d:
rs = 0
error = 0
flag2 = 0
grid = response.json()['board']
# grid = [
# [7, 8, 0, 4, 0, 0, 1, 2, 0],
# [6, 0, 0, 0, 7, 5, 0, 0, 9],
# [0, 0, 0, 6, 0, 1, 0, 7, 8],
# [0, 0, 7, 0, 4, 0, 2, 6, 0],
# [0, 0, 1, 0, 5, 0, 9, 3, 0],
# [9, 0, 4, 0, 6, 0, 0, 0, 5],
# [0, 7, 0, 3, 0, 0, 0, 1, 2],
# [1, 2, 0, 0, 0, 7, 4, 0, 0],
# [0, 4, 9, 2, 0, 6, 0, 0, 7]
# ]
if flag2 == 1:
if solve(grid, 0, 0) == False:
error = 1
else:
rs = 1
flag2 = 0
if value != 0:
draw_value(value)
if valid(grid, int(x), int(y), value) == True:
grid[int(x)][int(y)] = value
flag1 = 0
else:
grid[int(x)][int(y)] = 0
raise_error2()
value = 0
if error == 1:
raise_error1()
if rs == 1:
result()
draw()
if flag1 == 1:
draw_box()
instruction()
pygame.display.update()
pygame.quit()
def menu():
global level
MainRun = True
while MainRun:
screen.fill((18, 18, 18))
screen.blit(background, (0, 0))
welcome_line = font_bold_30.render("WELCOME", 1, (255, 255, 255))
line1 = font_bold_18.render("• For EASY difficulty sudoku, please press 1.", 1, (0, 255, 0))
line2 = font_bold_18.render("• For MEDIUM difficulty sudoku, please press 2.", 1, (242, 255, 0))
line3 = font_bold_18.render("• For HARD difficulty sudoku, please press 3.", 1, (255, 0, 0))
screen.blit(welcome_line, (170, 510))
screen.blit(line1, (20, 550))
screen.blit(line2, (20, 575))
screen.blit(line3, (20, 600))
for event in pygame.event.get():
if event.type == pygame.QUIT:
MainRun = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
level = 1
MainRun = False
print("Difficulty: Easy")
easy()
if event.key == pygame.K_2:
level = 2
MainRun = False
print("Difficulty: Medium")
medium()
if event.key == pygame.K_3:
level = 3
MainRun = False
print("Difficulty: Hard")
hard()
pygame.display.update()
menu()
pygame.display.update()
| 30.207792
| 124
| 0.377003
| 3,382
| 25,586
| 2.810763
| 0.057658
| 0.086261
| 0.097517
| 0.099726
| 0.894383
| 0.891753
| 0.880707
| 0.867452
| 0.867452
| 0.867452
| 0
| 0.131149
| 0.481162
| 25,586
| 846
| 125
| 30.243499
| 0.584708
| 0.071953
| 0
| 0.914754
| 0
| 0
| 0.0449
| 0.002707
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055738
| false
| 0
| 0.003279
| 0
| 0.093443
| 0.004918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1279b077848a941e70a7ddf278472287a682e30d
| 9,102
|
py
|
Python
|
tests/browser/elements/date_field_tests.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 83
|
2017-11-20T08:41:09.000Z
|
2022-02-09T21:01:47.000Z
|
tests/browser/elements/date_field_tests.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 28
|
2017-11-21T02:25:03.000Z
|
2021-04-15T15:26:30.000Z
|
tests/browser/elements/date_field_tests.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 14
|
2017-11-29T06:44:12.000Z
|
2021-09-06T04:53:44.000Z
|
import datetime
from re import compile
import pytest
from dateutil import parser
from nerodia.exception import UnknownObjectException
pytestmark = pytest.mark.page('forms_with_input_elements.html')
today = datetime.date.today()
class TestDateFieldExist(object):
def test_returns_true_if_the_date_field_exists(self, browser):
assert browser.date_field(id='html5_date').exists is True
assert browser.date_field(id=compile(r'html5_date')).exists is True
assert browser.date_field(name='html5_date').exists is True
assert browser.date_field(name=compile(r'html5_date')).exists is True
assert browser.date_field(text='').exists is True
assert browser.date_field(index=0).exists is True
assert browser.date_field(xpath="//input[@id='html5_date']").exists is True
assert browser.date_field(label='HTML5 Date').exists is True
assert browser.date_field(label=compile(r'Date$')).exists is True
def test_returns_the_first_date_field_if_given_no_args(self, browser):
assert browser.date_field().exists
def test_respects_date_field_types(self, browser):
assert browser.date_field().type == 'date'
def test_returns_false_if_the_date_field_doesnt_exist(self, browser):
assert browser.date_field(id='no_such_id').exists is False
assert browser.date_field(id=compile(r'no_such_id')).exists is False
assert browser.date_field(name='no_such_name').exists is False
assert browser.date_field(name=compile(r'no_such_name')).exists is False
assert browser.date_field(text='no_such_text').exists is False
assert browser.date_field(text=compile(r'no_such_text')).exists is False
assert browser.date_field(class_name='no_such_class').exists is False
assert browser.date_field(class_name=compile(r'no_such_class')).exists is False
assert browser.date_field(index=1337).exists is False
assert browser.date_field(xpath="//input[@id='no_such_id']").exists is False
assert browser.date_field(label='bad label').exists is False
assert browser.date_field(label=compile(r'bad label')).exists is False
def test_raises_correct_exception_when_what_argument_is_invalid(self, browser):
with pytest.raises(TypeError):
browser.date_field(id=3.14).exists
class TestDateFieldAttributes(object):
# id
def test_returns_the_id_attribute_if_element_exists(self, browser):
assert browser.date_field(name='html5_date').id == 'html5_date'
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_id_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(index=1337).id
# name
def test_returns_the_name_attribute_if_element_exists(self, browser):
assert browser.date_field(id='html5_date').name == 'html5_date'
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_name_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(index=1337).name
# type
def test_returns_the_type_attribute_if_element_exists(self, browser):
assert browser.date_field(id='html5_date').type == 'date'
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_type_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(index=1337).type
# value
def test_returns_the_value_attribute_if_element_exists(self, browser):
assert browser.date_field(id='html5_date').value == ''
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_value_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(index=1337).value
def test_finds_all_attribute_methods(browser):
assert hasattr(browser.date_field(), 'class_name')
assert hasattr(browser.date_field(), 'id')
assert hasattr(browser.date_field(), 'name')
assert hasattr(browser.date_field(), 'title')
assert hasattr(browser.date_field(), 'type')
assert hasattr(browser.date_field(), 'value')
class TestDateFieldAccessMethods(object):
# enabled
def test_returns_true_for_enabled_date_fields(self, browser):
assert browser.date_field(id='html5_date').enabled is True
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_enabled_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(index=1337).enabled
class TestDateFieldManipulationValue(object):
# value =
def test_sets_the_value_of_the_element(self, browser):
date = browser.date_field(id='html5_date')
date.value = today
assert parser.parse(date.value).date() == today
def test_sets_the_value_of_the_element_when_given_datetime(self, browser):
date = browser.date_field(id='html5_date')
value = datetime.datetime.now()
date.value = value
assert parser.parse(date.value).date() == today
def test_sets_the_value_of_the_element_when_given_string(self, browser):
date = browser.date_field(id='html5_date')
value = today.strftime('%Y-%m-%d')
date.value = value
assert parser.parse(date.value).date() == today
def test_sets_the_value_when_accessed_through_the_enclosing_form(self, browser):
date_field = browser.form(id='new_user').date_field(id='html5_date')
date_field.value = today
assert parser.parse(date_field.value).date() == today
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_value_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(id='no_such_id').value = today
def test_raises_correct_exception_for_value_if_using_non_date_parameter(self, browser):
with pytest.raises(TypeError):
browser.date_field(id='no_such_id').value = 5
class TestDateFieldManipulationJsSet(object):
# js_set
def test_sets_the_value_of_the_element(self, browser):
date = browser.date_field(id='html5_date')
date.js_set(today)
assert parser.parse(date.value).date() == today
def test_sets_the_value_of_the_element_when_given_datetime(self, browser):
date = browser.date_field(id='html5_date')
value = datetime.datetime.now()
date.js_set(value)
assert parser.parse(date.value).date() == today
def test_sets_the_value_of_the_element_when_given_string(self, browser):
date = browser.date_field(id='html5_date')
value = today.strftime('%Y-%m-%d')
date.js_set(value)
assert parser.parse(date.value).date() == today
def test_sets_the_value_when_accessed_through_the_enclosing_form(self, browser):
date_field = browser.form(id='new_user').date_field(id='html5_date')
date_field.js_set(today)
assert parser.parse(date_field.value).date() == today
def test_raises_correct_exception_for_js_set_when_no_args_are_provided(self, browser):
with pytest.raises(TypeError):
browser.date_field(id='html5_date').js_set()
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_js_set_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(id='no_such_id').js_set(today)
class TestDateFieldManipulationSet(object):
# set
def test_sets_the_value_of_the_element(self, browser):
date = browser.date_field(id='html5_date')
date.set(today)
assert parser.parse(date.value).date() == today
def test_sets_the_value_of_the_element_when_given_datetime(self, browser):
date = browser.date_field(id='html5_date')
value = datetime.datetime.now()
date.set(value)
assert parser.parse(date.value).date() == today
def test_sets_the_value_of_the_element_when_given_string(self, browser):
date = browser.date_field(id='html5_date')
value = today.strftime('%Y-%m-%d')
date.set(value)
assert parser.parse(date.value).date() == today
def test_sets_the_value_when_accessed_through_the_enclosing_form(self, browser):
date_field = browser.form(id='new_user').date_field(id='html5_date')
date_field.set(today)
assert parser.parse(date_field.value).date() == today
def test_raises_correct_exception_for_set_when_no_args_are_provided(self, browser):
with pytest.raises(TypeError):
browser.date_field(id='html5_date').set()
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_set_if_element_does_not_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.date_field(id='no_such_id').set(today)
| 39.746725
| 91
| 0.727752
| 1,244
| 9,102
| 4.980707
| 0.089228
| 0.103131
| 0.149774
| 0.099419
| 0.848289
| 0.815203
| 0.792285
| 0.75694
| 0.73725
| 0.719981
| 0
| 0.007403
| 0.168974
| 9,102
| 228
| 92
| 39.921053
| 0.81174
| 0.004944
| 0
| 0.451613
| 0
| 0
| 0.075928
| 0.008842
| 0
| 0
| 0
| 0
| 0.296774
| 1
| 0.219355
| false
| 0
| 0.032258
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
127e19dd298591ab10a48e6489a43ccc073faa8c
| 160,027
|
py
|
Python
|
uai_sushi/sort_data.py
|
evanthebouncy/nnhmm
|
acd76edaa1b3aa0c03d39f6a30e60d167359c6ad
|
[
"MIT"
] | null | null | null |
uai_sushi/sort_data.py
|
evanthebouncy/nnhmm
|
acd76edaa1b3aa0c03d39f6a30e60d167359c6ad
|
[
"MIT"
] | null | null | null |
uai_sushi/sort_data.py
|
evanthebouncy/nnhmm
|
acd76edaa1b3aa0c03d39f6a30e60d167359c6ad
|
[
"MIT"
] | null | null | null |
sort_train = [[5, 0, 3, 4, 6, 9, 8, 1, 7, 2], [0, 9, 6, 3, 7, 2, 8, 1, 5, 4], [7, 0, 2, 3, 8, 4, 5, 1, 9, 6], [4, 5, 7, 0, 2, 3, 1, 6, 8, 9], [8, 6, 5, 0, 3, 9, 2, 7, 4, 1], [6, 2, 9, 0, 5, 1, 4, 8, 7, 3], [4, 7, 0, 1, 2, 8, 3, 5, 9, 6], [6, 3, 7, 8, 0, 9, 2, 1, 4, 5], [2, 3, 7, 8, 0, 4, 5, 6, 1, 9], [0, 3, 1, 8, 6, 2, 9, 7, 4, 5], [7, 8, 3, 2, 6, 5, 0, 4, 9, 1], [4, 7, 1, 3, 2, 5, 6, 8, 0, 9], [7, 4, 5, 1, 0, 3, 8, 2, 9, 6], [3, 1, 0, 8, 7, 2, 9, 6, 5, 4], [2, 7, 1, 0, 4, 5, 9, 8, 6, 3], [7, 4, 3, 5, 0, 1, 2, 6, 8, 9], [3, 4, 0, 5, 9, 6, 7, 8, 2, 1], [7, 4, 1, 2, 0, 3, 6, 8, 5, 9], [7, 5, 0, 1, 2, 4, 8, 3, 9, 6], [5, 7, 4, 0, 3, 8, 1, 2, 9, 6], [4, 7, 0, 5, 2, 1, 8, 3, 6, 9], [7, 3, 5, 4, 0, 1, 6, 8, 9, 2], [1, 0, 7, 6, 2, 5, 8, 9, 3, 4], [5, 7, 4, 0, 2, 3, 8, 1, 6, 9], [2, 0, 6, 8, 7, 9, 1, 5, 4, 3], [7, 0, 5, 4, 1, 2, 8, 3, 6, 9], [5, 1, 0, 3, 6, 8, 7, 2, 4, 9], [7, 2, 5, 0, 4, 8, 3, 9, 1, 6], [5, 7, 3, 1, 2, 8, 9, 6, 0, 4], [7, 4, 3, 2, 0, 1, 5, 8, 6, 9], [3, 7, 2, 5, 6, 8, 0, 1, 9, 4], [7, 1, 2, 0, 8, 5, 6, 4, 9, 3], [7, 0, 4, 1, 5, 2, 6, 8, 3, 9], [2, 5, 6, 4, 0, 9, 8, 7, 1, 3], [7, 2, 1, 8, 0, 4, 3, 6, 9, 5], [5, 7, 1, 4, 2, 0, 8, 3, 9, 6], [0, 7, 3, 2, 1, 4, 5, 8, 6, 9], [7, 2, 6, 5, 1, 0, 8, 3, 9, 4], [4, 1, 2, 7, 5, 8, 3, 0, 6, 9], [1, 2, 8, 7, 6, 0, 3, 9, 5, 4], [5, 0, 6, 4, 3, 9, 1, 8, 7, 2], [7, 0, 1, 2, 8, 3, 6, 5, 9, 4], [7, 4, 2, 0, 3, 8, 5, 9, 1, 6], [8, 7, 5, 4, 2, 1, 6, 0, 3, 9], [8, 0, 3, 4, 5, 1, 2, 7, 9, 6], [7, 2, 4, 0, 3, 8, 6, 5, 1, 9], [7, 8, 1, 4, 5, 3, 0, 6, 9, 2], [0, 3, 7, 1, 8, 9, 6, 4, 2, 5], [1, 0, 3, 5, 2, 7, 8, 9, 6, 4], [2, 7, 5, 0, 9, 8, 6, 1, 3, 4], [7, 2, 5, 4, 3, 0, 8, 1, 6, 9], [6, 1, 0, 4, 3, 8, 7, 9, 2, 5], [4, 7, 5, 2, 3, 0, 1, 6, 8, 9], [2, 8, 3, 0, 7, 6, 5, 9, 1, 4], [4, 7, 2, 1, 3, 5, 0, 8, 9, 6], [7, 2, 1, 3, 8, 4, 5, 0, 9, 6], [4, 7, 1, 2, 8, 6, 9, 3, 5, 0], [5, 6, 0, 9, 7, 2, 1, 8, 3, 4], [1, 5, 0, 7, 6, 2, 4, 3, 8, 9], [1, 7, 5, 8, 2, 3, 0, 9, 6, 4], [4, 1, 5, 7, 3, 2, 0, 8, 6, 9], [7, 1, 0, 9, 6, 2, 8, 5, 3, 4], [7, 2, 8, 5, 6, 3, 0, 1, 4, 9], [7, 8, 3, 1, 0, 6, 2, 4, 9, 5], [1, 6, 0, 4, 3, 2, 9, 8, 7, 5], [2, 7, 0, 3, 5, 4, 1, 8, 9, 6], [7, 2, 4, 5, 1, 8, 0, 3, 6, 9], [1, 7, 5, 4, 3, 2, 6, 9, 0, 8], [1, 2, 6, 7, 3, 8, 9, 5, 0, 4], [1, 5, 7, 8, 6, 0, 4, 2, 3, 9], [7, 2, 1, 8, 5, 6, 4, 0, 3, 9], [7, 4, 6, 0, 3, 8, 2, 9, 1, 5], [2, 8, 0, 7, 9, 3, 6, 1, 4, 5], [1, 6, 2, 5, 7, 4, 0, 9, 8, 3], [4, 0, 3, 7, 2, 8, 5, 6, 9, 1], [0, 4, 8, 1, 7, 2, 3, 9, 5, 6], [7, 1, 0, 2, 8, 4, 3, 6, 9, 5], [0, 7, 2, 1, 6, 8, 4, 3, 5, 9], [7, 0, 3, 2, 8, 1, 9, 5, 4, 6], [7, 4, 5, 1, 0, 2, 8, 3, 6, 9], [0, 3, 9, 1, 6, 8, 7, 4, 5, 2], [9, 8, 5, 3, 6, 2, 0, 1, 4, 7], [7, 8, 9, 2, 5, 6, 3, 0, 1, 4], [3, 7, 2, 0, 8, 1, 6, 9, 4, 5], [5, 2, 8, 7, 3, 0, 9, 1, 6, 4], [5, 2, 7, 0, 1, 6, 3, 4, 8, 9], [1, 4, 5, 7, 3, 0, 2, 8, 6, 9], [8, 7, 1, 2, 4, 5, 6, 9, 0, 3], [7, 4, 5, 2, 8, 3, 0, 6, 9, 1], [4, 7, 2, 5, 8, 0, 3, 9, 1, 6], [7, 2, 0, 4, 5, 1, 9, 3, 6, 8], [2, 5, 8, 7, 1, 0, 9, 6, 3, 4], [5, 4, 7, 2, 3, 0, 1, 6, 8, 9], [7, 2, 8, 6, 5, 9, 3, 4, 0, 1], [6, 8, 1, 5, 0, 4, 9, 7, 2, 3], [5, 4, 7, 0, 2, 3, 1, 6, 8, 9], [4, 7, 2, 9, 1, 5, 8, 0, 6, 3], [7, 4, 9, 8, 5, 2, 0, 3, 1, 6], [1, 7, 5, 8, 0, 3, 2, 4, 9, 6], [7, 2, 1, 3, 0, 4, 9, 6, 8, 5], [7, 2, 1, 8, 3, 6, 9, 0, 4, 5], [2, 7, 4, 3, 0, 1, 8, 5, 9, 6], [8, 1, 5, 7, 4, 0, 2, 3, 6, 9], [9, 0, 3, 4, 5, 8, 6, 7, 1, 2], [2, 7, 1, 5, 4, 6, 8, 3, 9, 0], [4, 0, 3, 2, 8, 5, 1, 9, 6, 7], [2, 5, 4, 8, 0, 6, 3, 9, 1, 7], [5, 1, 4, 3, 0, 7, 6, 8, 9, 2], [7, 2, 5, 0, 1, 6, 8, 9, 4, 3], [1, 7, 5, 4, 2, 6, 3, 0, 8, 9], [7, 4, 1, 0, 2, 3, 6, 5, 9, 8], [7, 2, 0, 6, 8, 9, 1, 4, 3, 5], [0, 4, 3, 7, 1, 8, 5, 2, 6, 9], [2, 7, 0, 8, 1, 6, 9, 4, 5, 3], [7, 2, 8, 0, 3, 9, 4, 1, 5, 6], [4, 7, 1, 0, 3, 5, 6, 2, 8, 9], [1, 0, 5, 7, 2, 3, 6, 9, 8, 4], [5, 0, 7, 4, 3, 2, 8, 9, 1, 6], [0, 3, 4, 7, 2, 5, 8, 9, 6, 1], [4, 7, 1, 8, 0, 5, 2, 6, 3, 9], [5, 4, 7, 0, 8, 1, 3, 9, 2, 6], [1, 6, 3, 0, 7, 2, 9, 5, 8, 4], [7, 5, 3, 4, 0, 2, 8, 6, 9, 1], [2, 5, 9, 7, 4, 6, 1, 8, 0, 3], [4, 1, 5, 0, 6, 7, 2, 3, 8, 9], [4, 7, 2, 3, 0, 8, 5, 6, 1, 9], [5, 4, 0, 3, 2, 8, 1, 6, 7, 9], [2, 1, 5, 8, 7, 6, 0, 4, 9, 3], [0, 6, 5, 2, 3, 8, 9, 4, 7, 1], [7, 3, 2, 5, 4, 8, 1, 0, 9, 6], [4, 5, 7, 0, 1, 3, 6, 9, 2, 8], [7, 2, 4, 8, 0, 1, 3, 5, 6, 9], [2, 7, 8, 6, 1, 0, 3, 5, 9, 4], [7, 4, 1, 2, 0, 5, 6, 8, 3, 9], [4, 7, 2, 8, 3, 1, 6, 9, 0, 5], [7, 2, 6, 8, 5, 9, 4, 3, 1, 0], [2, 8, 7, 1, 9, 0, 6, 5, 3, 4], [2, 1, 8, 5, 0, 3, 6, 7, 4, 9], [0, 3, 6, 1, 2, 7, 5, 4, 8, 9], [0, 6, 5, 4, 1, 2, 7, 3, 9, 8], [3, 0, 1, 5, 7, 2, 8, 4, 9, 6], [1, 9, 6, 0, 8, 3, 5, 7, 4, 2], [0, 9, 7, 8, 3, 6, 2, 4, 5, 1], [7, 2, 8, 4, 3, 5, 1, 6, 9, 0], [1, 7, 2, 3, 0, 8, 6, 9, 5, 4], [4, 7, 5, 0, 3, 2, 1, 6, 8, 9], [2, 3, 8, 7, 0, 4, 5, 1, 9, 6], [6, 1, 5, 7, 0, 2, 8, 3, 4, 9], [4, 2, 3, 8, 0, 7, 5, 1, 9, 6], [7, 5, 4, 0, 6, 2, 1, 3, 8, 9], [5, 4, 7, 1, 3, 2, 9, 0, 6, 8], [7, 5, 1, 4, 8, 2, 3, 6, 0, 9], [4, 7, 0, 5, 8, 3, 2, 9, 1, 6], [7, 2, 8, 1, 5, 6, 4, 3, 9, 0], [4, 7, 5, 1, 2, 3, 8, 0, 9, 6], [7, 2, 6, 8, 3, 5, 1, 9, 4, 0], [4, 5, 7, 2, 0, 1, 6, 8, 3, 9], [7, 3, 2, 0, 6, 4, 9, 8, 1, 5], [7, 8, 2, 6, 1, 4, 5, 0, 9, 3], [7, 4, 1, 2, 3, 6, 0, 5, 8, 9], [7, 4, 0, 5, 2, 3, 1, 8, 6, 9], [2, 6, 7, 5, 0, 8, 1, 4, 9, 3], [5, 7, 4, 1, 2, 0, 9, 8, 3, 6], [2, 1, 3, 0, 8, 6, 7, 4, 5, 9], [2, 8, 1, 3, 7, 0, 5, 6, 4, 9], [2, 8, 0, 1, 3, 5, 6, 9, 7, 4], [7, 1, 0, 2, 8, 6, 9, 3, 5, 4], [1, 7, 4, 5, 2, 0, 3, 8, 9, 6], [7, 4, 1, 6, 8, 2, 0, 3, 5, 9], [7, 5, 8, 2, 3, 6, 9, 4, 0, 1], [2, 5, 8, 7, 0, 1, 6, 4, 3, 9], [1, 7, 4, 5, 0, 2, 6, 8, 3, 9], [4, 5, 1, 7, 3, 8, 0, 6, 2, 9], [0, 7, 5, 4, 1, 3, 2, 8, 9, 6], [4, 7, 0, 5, 6, 2, 1, 3, 8, 9], [3, 6, 0, 7, 2, 8, 5, 4, 1, 9], [7, 2, 4, 5, 1, 8, 6, 0, 9, 3], [6, 1, 7, 0, 8, 2, 3, 9, 4, 5], [3, 8, 0, 7, 4, 2, 5, 1, 6, 9], [7, 2, 8, 5, 0, 3, 1, 6, 9, 4], [7, 1, 0, 5, 2, 3, 4, 8, 6, 9], [4, 3, 8, 5, 0, 2, 6, 9, 1, 7], [5, 4, 7, 2, 0, 3, 1, 8, 9, 6], [3, 1, 2, 0, 7, 6, 9, 8, 5, 4], [1, 5, 4, 7, 6, 0, 2, 8, 3, 9], [7, 0, 2, 1, 4, 5, 3, 8, 6, 9], [5, 3, 4, 2, 7, 8, 9, 1, 6, 0], [7, 0, 3, 4, 8, 9, 1, 6, 2, 5], [1, 7, 2, 5, 6, 8, 0, 4, 9, 3], [6, 9, 0, 1, 4, 5, 3, 2, 8, 7], [7, 2, 0, 4, 5, 8, 1, 6, 9, 3], [4, 7, 0, 5, 2, 8, 3, 6, 1, 9], [3, 0, 2, 1, 8, 7, 9, 4, 5, 6], [7, 0, 2, 6, 5, 8, 1, 3, 9, 4], [5, 4, 1, 0, 7, 8, 9, 2, 3, 6], [0, 7, 8, 3, 9, 1, 2, 5, 6, 4], [1, 5, 6, 0, 8, 2, 3, 7, 9, 4], [0, 5, 8, 2, 7, 3, 9, 6, 1, 4], [7, 1, 6, 4, 8, 5, 3, 0, 2, 9], [4, 7, 1, 2, 0, 9, 5, 8, 6, 3], [0, 1, 3, 8, 7, 9, 6, 2, 5, 4], [6, 3, 0, 8, 2, 9, 1, 7, 5, 4], [6, 8, 2, 7, 0, 3, 1, 9, 4, 5], [7, 4, 5, 1, 0, 8, 2, 3, 6, 9], [6, 5, 8, 7, 2, 1, 4, 3, 0, 9], [7, 5, 2, 1, 3, 6, 8, 0, 9, 4], [7, 2, 8, 4, 5, 0, 3, 6, 1, 9], [7, 0, 1, 3, 4, 5, 2, 6, 9, 8], [0, 1, 6, 5, 2, 4, 3, 7, 8, 9], [7, 8, 2, 9, 6, 1, 4, 5, 3, 0], [5, 3, 2, 7, 8, 1, 0, 9, 6, 4], [0, 4, 3, 7, 1, 2, 6, 5, 9, 8], [7, 2, 8, 0, 3, 5, 1, 4, 9, 6], [7, 5, 4, 1, 3, 2, 8, 9, 6, 0], [0, 7, 2, 3, 8, 6, 9, 4, 1, 5], [7, 8, 2, 1, 0, 3, 5, 6, 9, 4], [1, 7, 2, 0, 8, 3, 4, 5, 6, 9], [1, 7, 8, 2, 5, 4, 0, 3, 9, 6], [7, 2, 0, 8, 4, 9, 3, 1, 5, 6], [4, 7, 2, 3, 8, 9, 5, 6, 1, 0], [2, 8, 1, 7, 0, 4, 3, 6, 9, 5], [7, 8, 2, 6, 4, 5, 9, 0, 1, 3], [7, 3, 0, 2, 9, 8, 4, 1, 5, 6], [7, 5, 4, 1, 6, 9, 8, 3, 0, 2], [4, 7, 0, 5, 3, 9, 8, 2, 6, 1], [7, 5, 2, 0, 3, 4, 8, 1, 9, 6], [7, 2, 5, 8, 1, 9, 0, 6, 4, 3], [3, 9, 6, 0, 1, 4, 5, 2, 7, 8], [7, 2, 5, 4, 8, 1, 0, 3, 9, 6], [0, 2, 1, 7, 9, 6, 8, 3, 5, 4], [4, 7, 2, 1, 8, 0, 3, 5, 6, 9], [2, 7, 5, 8, 0, 9, 4, 3, 1, 6], [7, 5, 1, 4, 2, 6, 3, 0, 8, 9], [8, 2, 3, 0, 7, 6, 1, 9, 4, 5], [7, 6, 2, 0, 1, 3, 8, 9, 5, 4], [1, 7, 8, 2, 3, 9, 6, 0, 5, 4], [7, 0, 6, 1, 5, 2, 9, 8, 4, 3], [0, 4, 7, 5, 2, 8, 1, 3, 6, 9], [7, 1, 4, 3, 2, 0, 8, 9, 6, 5], [2, 8, 7, 5, 0, 4, 1, 6, 3, 9], [8, 2, 7, 1, 3, 0, 9, 5, 6, 4], [1, 0, 2, 8, 9, 7, 3, 6, 5, 4], [7, 4, 2, 8, 0, 3, 1, 6, 5, 9], [1, 7, 2, 0, 8, 3, 5, 4, 6, 9], [3, 7, 4, 0, 1, 5, 2, 8, 9, 6], [7, 2, 5, 1, 3, 0, 8, 6, 9, 4], [2, 6, 8, 3, 0, 4, 7, 1, 9, 5], [3, 7, 4, 5, 1, 2, 8, 0, 9, 6], [5, 2, 3, 7, 0, 4, 8, 6, 9, 1], [7, 2, 1, 8, 0, 4, 5, 3, 9, 6], [8, 5, 0, 7, 2, 3, 4, 6, 1, 9], [7, 4, 3, 1, 0, 8, 2, 6, 9, 5], [7, 2, 0, 4, 5, 3, 1, 8, 6, 9], [7, 2, 8, 6, 0, 9, 3, 4, 1, 5], [7, 8, 2, 0, 9, 5, 6, 1, 3, 4], [4, 7, 2, 3, 5, 0, 8, 1, 6, 9], [3, 6, 8, 7, 2, 5, 0, 4, 9, 1], [5, 8, 7, 2, 4, 9, 1, 3, 0, 6], [5, 4, 7, 1, 8, 0, 2, 6, 3, 9], [7, 3, 0, 5, 6, 2, 4, 8, 9, 1], [3, 0, 7, 2, 8, 1, 9, 4, 6, 5], [1, 4, 3, 0, 7, 2, 8, 6, 5, 9], [2, 4, 5, 0, 7, 3, 9, 8, 1, 6], [5, 4, 7, 2, 0, 8, 1, 9, 3, 6], [4, 5, 7, 8, 1, 2, 0, 3, 6, 9], [4, 7, 5, 2, 3, 0, 8, 9, 1, 6], [0, 7, 2, 8, 3, 4, 5, 6, 1, 9], [4, 1, 5, 3, 7, 8, 2, 9, 0, 6], [4, 7, 0, 2, 5, 1, 3, 8, 6, 9], [2, 3, 0, 4, 8, 9, 7, 1, 5, 6], [7, 2, 8, 5, 4, 0, 1, 6, 3, 9], [1, 7, 2, 0, 3, 6, 8, 5, 9, 4], [0, 3, 7, 2, 6, 8, 1, 4, 5, 9], [7, 2, 0, 9, 8, 3, 5, 4, 1, 6], [7, 2, 5, 8, 6, 0, 3, 9, 4, 1], [2, 5, 7, 0, 3, 8, 9, 1, 4, 6], [5, 4, 7, 3, 2, 8, 6, 1, 9, 0], [1, 0, 8, 4, 7, 5, 2, 3, 9, 6], [2, 7, 0, 8, 6, 1, 5, 3, 4, 9], [7, 5, 0, 4, 2, 3, 1, 8, 6, 9], [7, 2, 5, 0, 8, 1, 9, 3, 6, 4], [2, 3, 0, 7, 8, 9, 6, 4, 5, 1], [5, 7, 2, 3, 8, 9, 6, 0, 1, 4], [7, 5, 4, 2, 0, 8, 1, 6, 3, 9], [6, 1, 4, 0, 7, 2, 5, 8, 9, 3], [1, 7, 5, 4, 8, 0, 3, 2, 6, 9], [7, 2, 3, 6, 8, 0, 9, 1, 5, 4], [7, 2, 0, 1, 8, 9, 6, 3, 4, 5], [0, 8, 4, 2, 7, 1, 3, 9, 6, 5], [7, 5, 2, 8, 4, 6, 3, 1, 0, 9], [4, 7, 1, 6, 0, 3, 2, 5, 8, 9], [7, 8, 6, 2, 9, 4, 3, 5, 1, 0], [0, 5, 3, 2, 7, 9, 8, 6, 4, 1], [1, 4, 6, 0, 7, 3, 2, 9, 8, 5], [6, 8, 0, 2, 5, 7, 9, 1, 3, 4], [7, 2, 0, 3, 8, 9, 5, 1, 6, 4], [4, 0, 3, 6, 9, 7, 8, 2, 5, 1], [6, 1, 7, 3, 2, 9, 8, 0, 4, 5], [2, 5, 0, 4, 1, 7, 3, 8, 6, 9], [7, 0, 2, 8, 5, 3, 9, 6, 1, 4], [3, 7, 0, 6, 1, 5, 9, 8, 2, 4], [5, 2, 7, 8, 0, 3, 6, 9, 4, 1], [7, 2, 4, 5, 0, 6, 1, 9, 8, 3], [6, 2, 8, 7, 1, 5, 4, 0, 3, 9], [7, 2, 5, 4, 0, 6, 3, 9, 8, 1], [4, 2, 7, 9, 1, 3, 0, 8, 5, 6], [1, 7, 8, 0, 2, 4, 5, 3, 6, 9], [1, 5, 0, 3, 6, 2, 8, 7, 9, 4], [7, 1, 5, 4, 0, 3, 2, 8, 6, 9], [7, 2, 1, 3, 6, 0, 8, 4, 9, 5], [4, 0, 7, 1, 8, 3, 2, 9, 5, 6], [4, 5, 0, 7, 1, 2, 8, 9, 3, 6], [5, 4, 7, 1, 0, 2, 8, 6, 3, 9], [4, 2, 7, 8, 3, 0, 9, 6, 5, 1], [5, 4, 2, 7, 8, 1, 3, 9, 0, 6], [3, 0, 9, 7, 2, 8, 6, 1, 4, 5], [7, 3, 2, 0, 9, 8, 6, 1, 5, 4], [2, 1, 0, 5, 8, 7, 6, 4, 3, 9], [7, 1, 8, 4, 5, 0, 3, 6, 2, 9], [2, 0, 8, 5, 6, 7, 4, 1, 3, 9], [7, 2, 8, 4, 5, 0, 1, 3, 6, 9], [1, 3, 0, 8, 2, 7, 9, 6, 5, 4], [1, 7, 4, 6, 2, 0, 8, 5, 3, 9], [1, 6, 8, 9, 2, 5, 7, 4, 0, 3], [2, 0, 3, 7, 5, 4, 6, 8, 9, 1], [4, 7, 1, 5, 3, 2, 9, 8, 0, 6], [6, 0, 1, 5, 8, 3, 2, 4, 9, 7], [9, 0, 6, 8, 7, 2, 3, 1, 4, 5], [3, 1, 7, 4, 5, 2, 8, 0, 9, 6], [5, 0, 4, 2, 8, 3, 6, 9, 1, 7], [7, 5, 2, 9, 3, 0, 6, 1, 8, 4], [3, 2, 7, 0, 8, 5, 6, 9, 1, 4], [7, 5, 0, 3, 2, 8, 6, 9, 4, 1], [0, 2, 3, 8, 9, 1, 6, 7, 5, 4], [7, 0, 4, 3, 8, 5, 2, 1, 9, 6], [5, 4, 7, 2, 8, 3, 0, 6, 1, 9], [7, 1, 2, 8, 5, 3, 0, 6, 9, 4], [7, 5, 3, 1, 8, 9, 2, 0, 6, 4], [4, 5, 3, 8, 2, 0, 1, 6, 9, 7], [5, 0, 6, 7, 9, 2, 3, 8, 4, 1], [5, 7, 2, 8, 3, 4, 1, 0, 6, 9], [7, 8, 5, 4, 0, 1, 2, 3, 6, 9], [4, 5, 7, 2, 0, 3, 1, 8, 6, 9], [5, 2, 8, 7, 0, 9, 3, 4, 1, 6], [7, 2, 8, 5, 0, 4, 1, 6, 3, 9], [5, 4, 7, 8, 2, 1, 3, 0, 6, 9], [1, 0, 3, 8, 6, 2, 7, 4, 9, 5], [2, 3, 4, 7, 1, 5, 6, 8, 9, 0], [8, 7, 2, 1, 6, 3, 9, 0, 4, 5], [0, 1, 4, 5, 6, 7, 3, 2, 9, 8], [5, 7, 1, 8, 4, 0, 2, 3, 6, 9], [5, 7, 1, 0, 2, 8, 3, 6, 4, 9], [7, 8, 1, 4, 2, 3, 0, 6, 9, 5], [7, 1, 8, 2, 6, 3, 0, 5, 4, 9], [7, 4, 5, 0, 1, 2, 8, 6, 3, 9], [1, 5, 4, 2, 3, 6, 0, 7, 8, 9], [7, 3, 2, 5, 8, 6, 0, 9, 1, 4], [1, 2, 7, 3, 8, 0, 4, 6, 9, 5], [7, 4, 8, 5, 1, 2, 0, 9, 3, 6], [8, 7, 2, 6, 1, 9, 0, 5, 4, 3], [0, 4, 5, 7, 3, 8, 6, 9, 2, 1], [4, 7, 5, 2, 1, 8, 0, 3, 6, 9], [7, 2, 3, 8, 4, 0, 1, 6, 9, 5], [6, 0, 1, 5, 8, 7, 2, 9, 3, 4], [7, 1, 0, 2, 3, 6, 8, 5, 9, 4], [4, 7, 1, 5, 6, 9, 3, 2, 8, 0], [0, 2, 8, 7, 1, 3, 9, 6, 5, 4], [3, 1, 5, 7, 2, 4, 6, 8, 9, 0], [4, 7, 1, 5, 2, 3, 8, 6, 0, 9], [6, 9, 5, 8, 0, 3, 2, 7, 4, 1], [1, 7, 6, 0, 4, 8, 2, 5, 3, 9], [1, 8, 5, 0, 9, 4, 6, 2, 7, 3], [5, 7, 2, 6, 0, 1, 8, 9, 4, 3], [4, 3, 7, 2, 0, 8, 6, 9, 5, 1], [3, 4, 8, 5, 2, 7, 1, 9, 6, 0], [3, 8, 4, 7, 2, 5, 0, 1, 9, 6], [7, 0, 8, 2, 5, 3, 9, 6, 4, 1], [7, 1, 4, 5, 0, 2, 3, 8, 9, 6], [7, 0, 4, 5, 2, 8, 1, 3, 6, 9], [4, 2, 0, 3, 8, 5, 7, 9, 6, 1], [5, 2, 3, 0, 7, 6, 8, 1, 4, 9], [7, 5, 3, 0, 8, 4, 2, 1, 6, 9], [7, 4, 5, 2, 1, 3, 8, 9, 6, 0], [7, 5, 4, 2, 0, 3, 8, 9, 6, 1], [0, 3, 6, 1, 8, 7, 2, 5, 4, 9], [7, 2, 3, 0, 8, 1, 6, 5, 4, 9], [7, 4, 5, 0, 3, 2, 8, 1, 9, 6], [0, 6, 8, 2, 3, 7, 5, 4, 1, 9], [5, 0, 3, 6, 1, 8, 4, 9, 7, 2], [7, 5, 2, 1, 0, 4, 3, 8, 6, 9], [0, 9, 6, 1, 7, 8, 5, 2, 4, 3], [7, 2, 5, 4, 1, 0, 3, 8, 9, 6], [7, 2, 4, 1, 5, 0, 8, 6, 3, 9], [3, 2, 7, 8, 5, 0, 4, 9, 1, 6], [1, 3, 0, 4, 8, 2, 5, 7, 9, 6], [7, 5, 4, 2, 1, 8, 0, 9, 3, 6], [2, 3, 9, 8, 6, 1, 0, 7, 4, 5], [0, 1, 6, 9, 7, 2, 3, 8, 4, 5], [5, 7, 0, 8, 2, 3, 6, 9, 4, 1], [7, 3, 2, 4, 0, 5, 8, 1, 9, 6], [7, 2, 5, 4, 3, 6, 8, 1, 0, 9], [1, 8, 2, 7, 4, 5, 6, 0, 3, 9], [7, 8, 2, 3, 4, 0, 9, 5, 6, 1], [7, 2, 3, 8, 1, 9, 5, 0, 4, 6], [4, 6, 1, 5, 7, 3, 9, 8, 2, 0], [5, 4, 7, 2, 1, 3, 0, 6, 8, 9], [7, 0, 3, 5, 8, 1, 4, 2, 6, 9], [7, 6, 1, 3, 2, 9, 8, 5, 4, 0], [6, 0, 4, 9, 5, 1, 2, 8, 3, 7], [1, 2, 8, 4, 0, 7, 5, 3, 6, 9], [5, 2, 4, 0, 3, 6, 1, 9, 7, 8], [0, 4, 7, 5, 2, 6, 1, 9, 8, 3], [0, 4, 7, 3, 1, 9, 8, 2, 5, 6], [5, 2, 7, 1, 8, 9, 6, 4, 0, 3], [4, 5, 6, 7, 8, 1, 0, 2, 9, 3], [2, 7, 0, 6, 8, 3, 9, 4, 1, 5], [1, 7, 2, 5, 4, 6, 3, 8, 0, 9], [5, 4, 3, 7, 2, 8, 0, 1, 9, 6], [4, 0, 3, 1, 6, 2, 9, 8, 5, 7], [7, 2, 6, 1, 3, 4, 9, 8, 5, 0], [7, 4, 2, 0, 5, 1, 6, 8, 9, 3], [7, 8, 2, 0, 1, 4, 5, 6, 9, 3], [0, 5, 9, 4, 2, 8, 7, 3, 1, 6], [2, 7, 8, 9, 1, 0, 3, 6, 4, 5], [4, 7, 2, 1, 5, 0, 3, 8, 6, 9], [7, 5, 4, 2, 3, 8, 1, 0, 6, 9], [7, 5, 6, 2, 8, 9, 3, 1, 0, 4], [7, 2, 5, 1, 8, 9, 0, 6, 3, 4], [3, 5, 8, 2, 7, 9, 4, 6, 1, 0], [5, 0, 7, 1, 3, 2, 8, 6, 9, 4], [7, 4, 5, 1, 0, 2, 8, 3, 9, 6], [7, 0, 2, 5, 8, 9, 3, 6, 1, 4], [2, 9, 8, 7, 0, 6, 5, 1, 3, 4], [5, 4, 2, 0, 7, 8, 1, 3, 6, 9], [0, 1, 3, 8, 9, 7, 2, 6, 4, 5], [7, 1, 4, 0, 2, 5, 3, 8, 9, 6], [7, 2, 0, 8, 3, 5, 1, 9, 6, 4], [5, 7, 3, 1, 2, 0, 4, 6, 8, 9], [7, 5, 2, 3, 8, 0, 4, 1, 6, 9], [4, 7, 0, 2, 8, 3, 5, 1, 6, 9], [7, 5, 4, 1, 0, 8, 3, 2, 6, 9], [5, 4, 7, 0, 1, 2, 8, 3, 6, 9], [4, 7, 1, 2, 0, 3, 5, 8, 6, 9], [7, 4, 2, 0, 3, 5, 1, 8, 9, 6], [2, 7, 8, 5, 6, 1, 9, 0, 3, 4], [7, 4, 2, 5, 1, 0, 3, 8, 6, 9], [7, 2, 6, 1, 9, 5, 0, 8, 3, 4], [7, 0, 5, 1, 3, 4, 2, 6, 8, 9], [4, 7, 1, 6, 8, 0, 5, 2, 3, 9], [2, 7, 8, 0, 6, 9, 1, 3, 4, 5], [1, 6, 7, 5, 2, 0, 8, 3, 9, 4], [8, 7, 3, 2, 0, 9, 6, 1, 5, 4], [7, 1, 4, 8, 2, 0, 3, 6, 9, 5], [7, 4, 2, 8, 0, 1, 6, 5, 3, 9], [8, 7, 2, 6, 0, 3, 9, 1, 4, 5], [5, 8, 7, 3, 4, 2, 0, 9, 1, 6], [1, 2, 4, 5, 7, 0, 3, 6, 9, 8], [5, 7, 1, 0, 2, 8, 6, 9, 3, 4], [0, 5, 4, 3, 2, 8, 7, 1, 9, 6], [4, 5, 2, 8, 7, 3, 0, 1, 6, 9], [7, 4, 2, 8, 5, 0, 1, 3, 9, 6], [7, 5, 4, 3, 1, 2, 8, 0, 6, 9], [7, 0, 4, 5, 1, 3, 2, 8, 9, 6], [7, 2, 3, 0, 5, 4, 8, 1, 6, 9], [7, 4, 3, 2, 1, 5, 0, 8, 6, 9], [7, 4, 5, 2, 0, 8, 3, 1, 9, 6], [5, 4, 7, 1, 0, 3, 9, 2, 8, 6], [7, 2, 0, 3, 8, 4, 9, 6, 1, 5], [0, 4, 3, 6, 7, 5, 2, 1, 8, 9], [1, 0, 2, 4, 5, 3, 7, 8, 6, 9], [0, 3, 8, 6, 4, 9, 2, 1, 7, 5], [1, 7, 5, 4, 6, 8, 2, 0, 9, 3], [0, 3, 1, 9, 5, 7, 8, 2, 6, 4], [7, 4, 2, 5, 6, 1, 8, 3, 0, 9], [1, 5, 6, 3, 0, 7, 2, 8, 9, 4], [7, 1, 5, 3, 4, 8, 6, 0, 2, 9], [7, 1, 6, 2, 0, 5, 8, 3, 9, 4], [7, 1, 2, 0, 8, 4, 5, 6, 3, 9], [5, 7, 2, 6, 1, 0, 3, 9, 4, 8], [0, 3, 2, 8, 7, 1, 9, 5, 6, 4], [7, 4, 5, 3, 2, 1, 8, 0, 6, 9], [7, 4, 1, 5, 2, 0, 3, 6, 8, 9], [7, 2, 0, 3, 8, 4, 5, 1, 6, 9], [4, 7, 1, 6, 5, 2, 3, 0, 8, 9], [2, 0, 7, 1, 8, 6, 9, 5, 4, 3], [0, 4, 5, 3, 2, 6, 8, 7, 9, 1], [5, 3, 6, 1, 8, 9, 2, 7, 0, 4], [8, 2, 7, 9, 0, 1, 4, 6, 3, 5], [8, 2, 7, 1, 4, 0, 9, 5, 3, 6], [0, 4, 2, 1, 6, 7, 8, 3, 9, 5], [0, 1, 7, 3, 4, 6, 5, 2, 8, 9], [4, 1, 7, 2, 6, 3, 8, 9, 5, 0], [7, 0, 2, 8, 4, 1, 6, 9, 5, 3], [3, 0, 7, 2, 8, 5, 4, 9, 1, 6], [0, 7, 3, 2, 6, 9, 8, 1, 4, 5], [5, 7, 1, 0, 4, 3, 6, 8, 2, 9], [0, 1, 8, 5, 4, 9, 6, 3, 7, 2], [4, 7, 2, 5, 0, 3, 8, 6, 1, 9], [6, 3, 0, 9, 5, 1, 7, 8, 4, 2], [7, 4, 5, 2, 1, 0, 6, 3, 8, 9], [7, 5, 0, 4, 2, 6, 8, 3, 1, 9], [1, 7, 0, 4, 3, 8, 5, 2, 6, 9], [3, 8, 2, 4, 5, 1, 7, 0, 6, 9], [2, 3, 6, 0, 8, 9, 1, 7, 4, 5], [0, 7, 4, 5, 1, 2, 8, 3, 6, 9], [2, 6, 0, 7, 9, 1, 8, 3, 5, 4], [7, 3, 8, 2, 1, 0, 4, 5, 9, 6], [7, 2, 8, 4, 1, 0, 6, 5, 3, 9], [5, 1, 6, 3, 7, 9, 2, 8, 0, 4], [7, 2, 3, 8, 6, 9, 0, 5, 1, 4], [0, 5, 2, 6, 3, 7, 8, 4, 1, 9], [4, 5, 0, 3, 2, 7, 9, 6, 8, 1], [7, 5, 8, 1, 6, 0, 9, 2, 4, 3], [7, 5, 4, 8, 1, 6, 2, 9, 0, 3], [1, 4, 3, 5, 0, 7, 8, 2, 6, 9], [7, 4, 0, 1, 6, 2, 5, 8, 3, 9], [5, 7, 1, 2, 8, 0, 6, 3, 9, 4], [1, 2, 7, 8, 0, 3, 6, 9, 4, 5], [7, 8, 2, 6, 1, 3, 0, 9, 5, 4], [1, 7, 4, 5, 2, 8, 3, 6, 0, 9], [0, 5, 8, 7, 1, 2, 4, 3, 9, 6], [7, 0, 2, 6, 3, 8, 9, 5, 4, 1], [7, 2, 1, 6, 8, 3, 5, 9, 0, 4], [4, 5, 7, 2, 0, 3, 9, 1, 8, 6], [1, 2, 7, 0, 3, 4, 8, 9, 5, 6], [4, 5, 7, 2, 3, 0, 8, 6, 1, 9], [3, 2, 7, 8, 1, 6, 0, 5, 4, 9], [1, 5, 3, 7, 8, 0, 2, 6, 9, 4], [2, 3, 1, 6, 5, 4, 7, 8, 0, 9], [5, 0, 2, 4, 7, 8, 3, 1, 9, 6], [7, 4, 5, 2, 1, 0, 3, 8, 9, 6], [4, 7, 2, 1, 0, 8, 5, 3, 9, 6], [5, 0, 7, 4, 6, 2, 9, 8, 1, 3], [7, 5, 0, 3, 2, 8, 1, 9, 6, 4], [4, 7, 5, 0, 2, 3, 8, 6, 1, 9], [0, 7, 2, 9, 5, 1, 8, 4, 3, 6], [4, 7, 1, 2, 8, 3, 5, 0, 6, 9], [5, 7, 8, 3, 2, 6, 1, 9, 0, 4], [0, 6, 9, 1, 3, 8, 2, 7, 5, 4], [2, 7, 8, 3, 4, 5, 6, 0, 1, 9], [7, 8, 2, 0, 1, 6, 9, 3, 5, 4], [4, 7, 1, 2, 8, 6, 9, 0, 5, 3], [7, 2, 5, 8, 1, 3, 0, 6, 9, 4], [3, 0, 4, 5, 7, 2, 8, 1, 9, 6], [7, 1, 0, 2, 5, 4, 8, 6, 9, 3], [7, 2, 5, 8, 3, 4, 0, 1, 6, 9], [5, 4, 1, 7, 2, 8, 3, 9, 6, 0], [5, 4, 2, 1, 6, 7, 8, 3, 0, 9], [5, 7, 8, 2, 9, 6, 1, 0, 3, 4], [7, 4, 2, 5, 1, 0, 8, 3, 9, 6], [7, 8, 2, 4, 1, 5, 9, 0, 3, 6], [1, 8, 7, 3, 2, 4, 6, 0, 5, 9], [5, 8, 9, 7, 2, 6, 1, 3, 0, 4], [7, 9, 2, 5, 0, 4, 6, 3, 8, 1], [8, 0, 4, 9, 3, 1, 2, 7, 6, 5], [1, 4, 7, 5, 2, 6, 0, 3, 8, 9], [1, 5, 4, 0, 3, 6, 7, 2, 8, 9], [3, 6, 0, 9, 8, 1, 7, 2, 5, 4], [0, 8, 2, 1, 3, 4, 6, 7, 9, 5], [7, 5, 6, 4, 3, 0, 2, 8, 9, 1], [4, 7, 2, 5, 1, 8, 0, 3, 6, 9], [1, 4, 5, 7, 2, 0, 8, 6, 9, 3], [1, 7, 2, 4, 5, 0, 3, 9, 6, 8], [0, 1, 2, 8, 4, 3, 5, 9, 6, 7], [6, 7, 9, 5, 0, 1, 4, 3, 8, 2], [3, 5, 7, 1, 4, 0, 2, 8, 9, 6], [2, 7, 3, 8, 6, 9, 0, 1, 5, 4], [2, 5, 4, 7, 6, 3, 8, 0, 1, 9], [2, 0, 7, 3, 6, 4, 5, 1, 8, 9], [7, 3, 0, 4, 5, 2, 1, 6, 8, 9], [7, 5, 1, 4, 8, 2, 9, 6, 0, 3], [7, 4, 5, 1, 2, 3, 6, 0, 8, 9], [5, 0, 1, 7, 2, 3, 9, 8, 6, 4], [5, 2, 0, 8, 3, 7, 4, 9, 1, 6], [7, 2, 3, 8, 5, 0, 6, 1, 9, 4], [1, 2, 7, 6, 9, 8, 0, 5, 3, 4], [3, 7, 2, 8, 0, 1, 6, 9, 5, 4], [7, 5, 8, 4, 3, 9, 2, 6, 1, 0], [5, 0, 2, 1, 6, 8, 3, 7, 4, 9], [1, 4, 7, 5, 0, 6, 3, 2, 8, 9], [4, 1, 3, 5, 7, 6, 2, 0, 8, 9], [6, 7, 8, 4, 1, 0, 5, 2, 9, 3], [7, 4, 2, 0, 5, 3, 1, 8, 9, 6], [4, 7, 2, 1, 0, 8, 6, 5, 3, 9], [5, 6, 7, 9, 2, 8, 0, 3, 1, 4], [7, 2, 8, 5, 0, 3, 9, 6, 1, 4], [8, 7, 2, 5, 0, 3, 9, 4, 1, 6], [7, 4, 0, 5, 3, 8, 2, 1, 9, 6], [7, 4, 2, 8, 0, 3, 9, 1, 5, 6], [5, 6, 0, 3, 9, 7, 8, 2, 1, 4], [7, 1, 2, 0, 5, 6, 3, 8, 4, 9], [7, 2, 1, 0, 3, 5, 8, 6, 9, 4], [7, 4, 5, 2, 8, 1, 6, 3, 0, 9], [4, 1, 7, 5, 0, 3, 8, 2, 6, 9], [7, 6, 0, 2, 1, 5, 4, 8, 3, 9], [7, 5, 2, 1, 8, 6, 3, 0, 4, 9], [0, 7, 2, 3, 4, 1, 8, 6, 5, 9], [2, 4, 7, 3, 8, 5, 0, 6, 1, 9], [3, 1, 9, 0, 6, 8, 2, 5, 7, 4], [0, 7, 4, 2, 3, 5, 1, 8, 9, 6], [1, 7, 4, 3, 8, 5, 6, 9, 2, 0], [7, 3, 2, 0, 8, 6, 1, 4, 9, 5], [4, 7, 2, 1, 0, 3, 8, 9, 6, 5], [3, 7, 5, 4, 8, 1, 9, 2, 0, 6], [1, 0, 3, 5, 6, 7, 4, 2, 8, 9], [7, 4, 2, 0, 5, 1, 3, 6, 8, 9], [7, 2, 4, 3, 5, 1, 0, 6, 8, 9], [7, 2, 0, 8, 4, 3, 1, 5, 6, 9], [0, 7, 3, 6, 4, 1, 8, 2, 5, 9], [7, 1, 4, 5, 0, 6, 2, 3, 8, 9], [7, 5, 1, 4, 3, 2, 0, 8, 9, 6], [5, 7, 2, 3, 1, 9, 8, 6, 4, 0], [2, 1, 3, 0, 7, 4, 5, 9, 8, 6], [7, 2, 8, 3, 1, 6, 0, 9, 5, 4], [7, 2, 4, 1, 5, 8, 9, 0, 6, 3], [7, 2, 5, 8, 6, 0, 3, 1, 4, 9], [4, 7, 0, 2, 3, 1, 5, 8, 9, 6], [7, 4, 2, 5, 3, 1, 8, 0, 6, 9], [4, 7, 3, 0, 9, 2, 8, 1, 6, 5], [2, 7, 8, 3, 4, 5, 1, 9, 0, 6], [7, 2, 8, 4, 3, 0, 1, 5, 6, 9], [4, 7, 2, 0, 3, 5, 1, 8, 9, 6], [1, 5, 7, 2, 6, 8, 4, 3, 0, 9], [7, 2, 3, 0, 1, 4, 5, 8, 9, 6], [7, 8, 1, 3, 5, 2, 4, 9, 6, 0], [2, 8, 7, 0, 6, 5, 1, 4, 3, 9], [2, 7, 0, 8, 3, 9, 6, 4, 1, 5], [9, 2, 8, 7, 0, 3, 5, 6, 4, 1], [4, 1, 0, 8, 5, 2, 7, 9, 3, 6], [7, 1, 2, 0, 3, 8, 6, 4, 5, 9], [1, 8, 0, 9, 5, 6, 3, 7, 2, 4], [8, 3, 2, 0, 4, 7, 5, 1, 6, 9], [6, 8, 3, 2, 1, 7, 9, 0, 4, 5], [7, 2, 5, 4, 1, 0, 3, 6, 9, 8], [4, 7, 5, 0, 2, 3, 1, 8, 6, 9], [5, 2, 7, 4, 0, 6, 1, 8, 3, 9], [1, 7, 0, 2, 6, 4, 8, 9, 3, 5], [2, 9, 7, 0, 5, 8, 1, 3, 4, 6], [0, 4, 1, 5, 3, 6, 8, 2, 9, 7], [7, 4, 5, 1, 2, 3, 8, 6, 9, 0], [8, 7, 2, 1, 3, 9, 6, 4, 0, 5], [5, 7, 2, 3, 1, 0, 8, 9, 6, 4], [7, 2, 4, 3, 1, 5, 0, 8, 6, 9], [7, 2, 8, 3, 9, 0, 6, 1, 5, 4], [1, 2, 7, 0, 6, 3, 8, 9, 5, 4], [7, 4, 1, 2, 8, 3, 0, 5, 9, 6], [8, 2, 1, 0, 5, 7, 3, 6, 4, 9], [4, 5, 0, 7, 3, 2, 1, 8, 9, 6], [1, 2, 5, 8, 7, 9, 0, 4, 6, 3], [7, 2, 8, 0, 6, 9, 1, 3, 5, 4], [2, 7, 1, 0, 4, 5, 3, 8, 9, 6], [7, 4, 5, 0, 2, 3, 6, 1, 8, 9], [3, 7, 2, 5, 1, 4, 8, 6, 9, 0], [7, 2, 1, 3, 0, 5, 6, 8, 9, 4], [0, 5, 3, 2, 8, 7, 6, 9, 1, 4], [1, 0, 2, 5, 3, 7, 9, 8, 6, 4], [0, 1, 3, 6, 7, 5, 8, 2, 9, 4], [7, 1, 6, 2, 9, 5, 0, 4, 3, 8], [7, 2, 4, 5, 9, 8, 3, 1, 6, 0], [7, 0, 5, 1, 2, 8, 9, 6, 3, 4], [7, 0, 3, 2, 4, 1, 5, 6, 8, 9], [5, 0, 7, 2, 1, 8, 9, 4, 3, 6], [2, 5, 7, 8, 0, 4, 1, 6, 3, 9], [8, 2, 7, 4, 5, 3, 1, 0, 6, 9], [5, 7, 4, 0, 2, 8, 9, 1, 3, 6], [3, 1, 6, 9, 8, 2, 7, 4, 5, 0], [0, 7, 1, 5, 4, 3, 6, 2, 8, 9], [7, 2, 8, 6, 3, 0, 1, 5, 4, 9], [4, 1, 6, 0, 2, 8, 9, 7, 5, 3], [5, 0, 7, 2, 6, 8, 3, 4, 9, 1], [7, 2, 5, 0, 4, 1, 3, 6, 8, 9], [7, 3, 9, 6, 2, 8, 1, 0, 5, 4], [1, 0, 3, 5, 7, 8, 4, 2, 6, 9], [0, 7, 5, 1, 2, 9, 8, 6, 3, 4], [7, 1, 2, 6, 3, 8, 5, 0, 9, 4], [0, 2, 7, 8, 9, 6, 5, 3, 1, 4], [7, 1, 3, 0, 2, 8, 4, 6, 5, 9], [5, 2, 0, 4, 6, 3, 9, 8, 7, 1], [8, 9, 7, 2, 3, 0, 6, 4, 5, 1], [1, 7, 4, 5, 3, 0, 2, 8, 6, 9], [5, 1, 0, 3, 6, 9, 4, 7, 8, 2], [6, 8, 2, 0, 7, 1, 9, 4, 5, 3], [8, 1, 5, 3, 9, 2, 7, 6, 4, 0], [7, 2, 1, 0, 5, 4, 6, 9, 3, 8], [4, 7, 2, 0, 3, 1, 5, 8, 6, 9], [7, 5, 8, 1, 4, 2, 0, 6, 3, 9], [7, 3, 4, 2, 0, 8, 9, 6, 5, 1], [4, 7, 0, 3, 5, 6, 1, 2, 8, 9], [7, 0, 9, 8, 1, 4, 5, 2, 3, 6], [6, 3, 0, 1, 9, 7, 2, 5, 4, 8], [7, 8, 0, 2, 3, 4, 1, 6, 9, 5], [0, 5, 2, 7, 8, 9, 3, 6, 1, 4], [7, 4, 0, 1, 5, 2, 8, 3, 9, 6], [5, 7, 1, 0, 2, 6, 8, 3, 9, 4], [7, 2, 0, 3, 8, 4, 5, 6, 9, 1], [7, 1, 5, 0, 2, 4, 6, 3, 8, 9], [7, 2, 8, 0, 6, 9, 3, 5, 1, 4], [7, 5, 0, 2, 6, 3, 4, 8, 9, 1], [7, 2, 8, 0, 3, 9, 1, 6, 5, 4], [3, 1, 5, 2, 7, 8, 4, 0, 9, 6], [7, 2, 6, 9, 8, 1, 3, 0, 5, 4], [7, 5, 2, 8, 0, 6, 3, 1, 9, 4], [7, 4, 5, 3, 2, 0, 1, 8, 9, 6], [1, 4, 2, 5, 7, 3, 0, 8, 9, 6], [7, 2, 8, 5, 4, 3, 0, 1, 6, 9], [2, 4, 8, 3, 7, 0, 6, 9, 1, 5], [7, 5, 4, 3, 0, 1, 8, 6, 9, 2], [7, 6, 2, 1, 8, 0, 4, 3, 9, 5], [7, 5, 1, 4, 0, 2, 6, 3, 8, 9], [0, 3, 6, 9, 8, 1, 4, 7, 5, 2], [7, 4, 2, 5, 3, 1, 6, 0, 8, 9], [6, 1, 9, 0, 3, 2, 7, 8, 5, 4], [0, 4, 7, 5, 2, 3, 6, 9, 1, 8], [7, 5, 0, 2, 1, 3, 6, 4, 8, 9], [4, 7, 5, 2, 0, 1, 8, 3, 6, 9], [2, 0, 7, 1, 6, 8, 9, 3, 4, 5], [7, 5, 1, 0, 2, 8, 6, 3, 9, 4], [7, 5, 2, 6, 3, 1, 0, 8, 9, 4], [4, 5, 2, 8, 9, 1, 3, 7, 0, 6], [5, 1, 4, 3, 9, 7, 8, 6, 2, 0], [7, 4, 0, 1, 5, 2, 6, 3, 8, 9], [7, 2, 1, 5, 0, 3, 4, 8, 6, 9], [4, 7, 1, 5, 0, 2, 8, 6, 3, 9], [2, 1, 0, 7, 4, 8, 6, 3, 9, 5], [0, 4, 1, 5, 3, 9, 8, 7, 6, 2], [0, 7, 2, 1, 8, 3, 6, 9, 4, 5], [5, 4, 7, 8, 3, 1, 0, 2, 9, 6], [0, 7, 2, 1, 3, 6, 8, 9, 5, 4], [7, 4, 2, 5, 3, 1, 6, 9, 8, 0], [7, 0, 2, 4, 3, 5, 1, 8, 9, 6], [1, 0, 4, 7, 8, 2, 6, 5, 3, 9], [0, 7, 2, 8, 9, 3, 4, 5, 1, 6], [7, 3, 2, 0, 6, 8, 9, 1, 5, 4], [6, 5, 0, 1, 9, 2, 7, 3, 4, 8], [7, 2, 8, 3, 5, 0, 9, 6, 4, 1], [2, 7, 1, 0, 4, 5, 3, 8, 6, 9], [0, 1, 9, 3, 8, 2, 6, 5, 4, 7], [7, 3, 1, 2, 8, 4, 9, 5, 0, 6], [7, 0, 5, 2, 8, 1, 4, 3, 6, 9], [4, 5, 7, 2, 8, 6, 9, 1, 0, 3], [7, 1, 4, 2, 5, 0, 3, 8, 6, 9], [4, 1, 8, 7, 5, 2, 0, 3, 9, 6], [7, 4, 5, 2, 8, 9, 6, 0, 1, 3], [1, 7, 2, 8, 6, 5, 3, 9, 4, 0], [7, 4, 0, 1, 2, 9, 6, 8, 3, 5], [4, 7, 5, 2, 6, 0, 8, 1, 9, 3], [7, 6, 5, 8, 2, 3, 4, 0, 9, 1], [4, 7, 1, 5, 2, 8, 3, 0, 9, 6], [7, 2, 5, 8, 1, 9, 0, 4, 3, 6], [0, 7, 3, 2, 6, 5, 9, 8, 1, 4], [7, 1, 2, 0, 6, 8, 4, 5, 3, 9], [7, 4, 5, 3, 6, 2, 0, 1, 8, 9], [5, 2, 7, 0, 4, 1, 8, 3, 9, 6], [3, 1, 6, 8, 2, 9, 0, 5, 4, 7], [4, 7, 1, 5, 8, 2, 0, 9, 6, 3], [6, 7, 1, 2, 9, 8, 0, 3, 5, 4], [0, 3, 1, 6, 9, 4, 8, 7, 2, 5], [7, 2, 9, 3, 1, 8, 0, 6, 4, 5], [3, 0, 6, 7, 9, 5, 2, 4, 8, 1], [7, 5, 4, 0, 3, 1, 8, 2, 9, 6], [0, 4, 2, 7, 3, 5, 1, 9, 8, 6], [7, 4, 3, 1, 0, 5, 9, 8, 2, 6], [7, 2, 0, 8, 3, 6, 9, 1, 5, 4], [4, 7, 0, 3, 8, 1, 5, 9, 2, 6], [4, 3, 5, 8, 9, 7, 2, 1, 6, 0], [3, 7, 5, 0, 4, 2, 6, 1, 8, 9], [0, 7, 2, 6, 9, 8, 3, 5, 4, 1], [7, 8, 2, 0, 6, 9, 1, 4, 5, 3], [0, 3, 1, 4, 7, 6, 2, 8, 5, 9], [7, 0, 2, 4, 8, 6, 3, 9, 5, 1], [4, 0, 7, 5, 2, 1, 8, 3, 6, 9], [0, 1, 4, 5, 7, 6, 9, 8, 3, 2], [7, 1, 2, 8, 0, 3, 9, 6, 5, 4], [7, 5, 3, 0, 8, 2, 1, 4, 6, 9], [0, 8, 7, 9, 4, 5, 2, 3, 1, 6], [3, 2, 6, 7, 9, 4, 0, 8, 1, 5], [7, 1, 8, 0, 5, 3, 2, 6, 4, 9], [7, 4, 2, 5, 3, 1, 0, 9, 8, 6], [0, 4, 5, 8, 3, 6, 9, 7, 2, 1], [7, 5, 0, 9, 8, 2, 1, 3, 6, 4], [1, 7, 3, 6, 0, 4, 5, 8, 9, 2], [7, 1, 2, 0, 3, 4, 5, 8, 9, 6], [7, 5, 8, 2, 6, 3, 1, 0, 9, 4], [3, 0, 7, 2, 8, 1, 5, 9, 6, 4], [0, 5, 1, 7, 9, 6, 2, 3, 8, 4], [4, 0, 7, 1, 2, 8, 3, 9, 6, 5], [5, 6, 3, 0, 8, 7, 2, 9, 4, 1], [0, 7, 3, 4, 1, 6, 8, 2, 5, 9], [4, 7, 5, 2, 0, 8, 6, 3, 9, 1], [9, 0, 5, 4, 7, 1, 8, 2, 3, 6], [7, 2, 4, 8, 6, 0, 5, 1, 3, 9], [7, 5, 0, 6, 8, 4, 1, 2, 3, 9], [7, 2, 1, 4, 0, 8, 5, 3, 6, 9], [7, 8, 2, 5, 3, 0, 4, 6, 1, 9], [3, 0, 2, 1, 4, 7, 5, 8, 6, 9], [7, 2, 0, 5, 8, 1, 3, 9, 6, 4], [7, 5, 4, 1, 8, 2, 6, 0, 3, 9], [6, 1, 9, 0, 5, 3, 8, 7, 4, 2], [7, 4, 3, 2, 5, 0, 8, 1, 9, 6], [7, 4, 5, 1, 2, 0, 8, 3, 6, 9], [4, 1, 7, 2, 8, 5, 0, 3, 6, 9], [7, 5, 2, 4, 0, 6, 9, 8, 3, 1], [2, 1, 0, 5, 7, 8, 3, 9, 4, 6], [4, 1, 0, 2, 7, 6, 3, 9, 8, 5], [4, 5, 8, 7, 9, 2, 0, 3, 6, 1], [7, 8, 2, 0, 3, 1, 5, 9, 4, 6], [4, 7, 2, 5, 0, 8, 9, 3, 1, 6], [5, 6, 1, 7, 3, 0, 2, 8, 9, 4], [7, 0, 2, 4, 5, 1, 6, 8, 3, 9], [3, 0, 9, 4, 7, 2, 1, 6, 8, 5], [6, 1, 3, 9, 5, 0, 8, 4, 2, 7], [2, 7, 4, 5, 0, 3, 1, 8, 9, 6], [2, 0, 7, 3, 5, 1, 6, 8, 9, 4], [5, 7, 3, 6, 0, 4, 2, 8, 9, 1], [0, 2, 7, 8, 3, 6, 5, 4, 9, 1], [7, 4, 8, 5, 2, 3, 1, 6, 0, 9], [1, 7, 3, 0, 2, 8, 9, 5, 4, 6], [7, 0, 1, 3, 4, 2, 5, 8, 9, 6], [5, 7, 4, 2, 8, 6, 9, 0, 3, 1], [9, 1, 5, 0, 6, 4, 8, 3, 7, 2], [1, 4, 7, 8, 6, 5, 2, 3, 0, 9], [5, 7, 0, 1, 4, 9, 6, 8, 3, 2], [4, 0, 6, 3, 5, 1, 8, 2, 7, 9], [7, 8, 4, 2, 1, 5, 0, 3, 6, 9], [7, 1, 6, 3, 8, 9, 0, 5, 4, 2], [1, 3, 6, 0, 5, 2, 4, 7, 8, 9], [5, 4, 7, 2, 3, 1, 0, 8, 6, 9], [7, 5, 6, 3, 2, 9, 8, 1, 0, 4], [7, 2, 5, 1, 4, 8, 9, 0, 6, 3], [1, 2, 4, 7, 3, 5, 0, 8, 6, 9], [2, 7, 6, 1, 8, 0, 4, 5, 9, 3], [9, 2, 7, 5, 8, 6, 3, 4, 1, 0], [7, 2, 1, 5, 0, 8, 4, 9, 3, 6], [7, 4, 0, 3, 2, 1, 5, 8, 9, 6], [0, 7, 2, 8, 3, 9, 6, 1, 5, 4], [0, 5, 1, 7, 2, 6, 4, 8, 3, 9], [7, 2, 8, 5, 0, 1, 9, 3, 6, 4], [7, 2, 6, 8, 3, 1, 0, 5, 4, 9], [7, 2, 1, 3, 5, 6, 8, 0, 9, 4], [1, 0, 8, 9, 3, 6, 2, 7, 5, 4], [0, 1, 3, 5, 4, 8, 6, 9, 7, 2], [7, 1, 0, 3, 2, 5, 4, 8, 9, 6], [7, 0, 1, 2, 6, 3, 8, 4, 9, 5], [7, 2, 0, 3, 8, 1, 4, 5, 6, 9], [2, 7, 6, 8, 4, 9, 1, 5, 3, 0], [7, 4, 3, 8, 1, 5, 2, 9, 0, 6], [7, 1, 2, 8, 3, 0, 6, 5, 9, 4], [7, 2, 0, 4, 5, 3, 1, 8, 9, 6], [7, 5, 2, 3, 0, 4, 1, 9, 6, 8], [4, 7, 3, 1, 8, 6, 2, 0, 9, 5], [7, 4, 2, 8, 9, 5, 6, 1, 3, 0], [4, 5, 7, 3, 1, 0, 2, 8, 6, 9], [2, 6, 8, 5, 0, 4, 3, 7, 1, 9], [7, 8, 2, 5, 3, 4, 6, 1, 0, 9], [2, 7, 4, 5, 1, 8, 3, 9, 6, 0], [4, 7, 1, 2, 0, 5, 3, 8, 6, 9], [0, 4, 5, 1, 7, 2, 3, 9, 8, 6], [3, 9, 0, 8, 1, 2, 6, 7, 5, 4], [6, 0, 2, 7, 3, 8, 5, 9, 1, 4], [6, 0, 3, 1, 9, 8, 2, 7, 4, 5], [9, 7, 2, 8, 3, 1, 6, 4, 0, 5], [1, 9, 6, 5, 0, 3, 4, 8, 2, 7], [5, 7, 4, 6, 2, 1, 3, 8, 0, 9], [0, 5, 6, 3, 7, 8, 2, 4, 1, 9], [7, 4, 5, 1, 2, 8, 0, 9, 6, 3], [7, 2, 3, 9, 0, 1, 8, 5, 6, 4], [7, 8, 5, 2, 0, 3, 4, 1, 9, 6], [8, 1, 6, 0, 7, 2, 3, 5, 9, 4], [7, 2, 8, 0, 5, 4, 3, 1, 9, 6], [4, 7, 1, 2, 5, 9, 6, 0, 3, 8], [1, 7, 6, 2, 3, 8, 5, 4, 0, 9], [4, 7, 2, 5, 0, 8, 1, 3, 6, 9], [2, 4, 0, 6, 1, 8, 7, 9, 5, 3], [0, 7, 4, 2, 5, 3, 8, 1, 9, 6], [4, 1, 0, 3, 9, 7, 8, 2, 6, 5], [4, 5, 7, 8, 0, 2, 1, 6, 9, 3], [5, 7, 6, 0, 2, 3, 8, 1, 9, 4], [4, 8, 1, 7, 2, 3, 5, 0, 9, 6], [7, 2, 0, 3, 8, 4, 9, 6, 5, 1], [7, 3, 4, 1, 0, 5, 2, 6, 8, 9], [7, 5, 2, 8, 3, 6, 1, 0, 9, 4], [1, 2, 7, 5, 0, 4, 3, 8, 9, 6], [7, 2, 4, 0, 6, 8, 5, 9, 1, 3], [7, 2, 8, 3, 1, 4, 9, 6, 0, 5], [2, 5, 6, 3, 8, 7, 0, 9, 1, 4], [8, 2, 7, 3, 1, 6, 5, 4, 0, 9], [2, 3, 0, 8, 7, 6, 9, 1, 5, 4], [5, 9, 2, 8, 7, 0, 1, 3, 6, 4], [7, 8, 2, 0, 3, 9, 6, 5, 1, 4], [1, 0, 7, 2, 3, 8, 6, 9, 4, 5], [5, 7, 2, 8, 1, 3, 4, 0, 9, 6], [7, 2, 3, 6, 8, 0, 9, 1, 5, 4], [2, 7, 1, 8, 3, 6, 9, 4, 5, 0], [2, 3, 0, 6, 1, 5, 8, 4, 7, 9], [5, 7, 2, 8, 9, 0, 3, 6, 4, 1], [7, 1, 2, 8, 4, 5, 6, 0, 9, 3], [6, 3, 0, 9, 1, 2, 8, 7, 5, 4], [5, 0, 1, 4, 3, 2, 7, 8, 9, 6], [8, 7, 5, 4, 2, 0, 1, 3, 9, 6], [4, 5, 7, 0, 2, 1, 8, 3, 6, 9], [7, 6, 1, 8, 2, 3, 9, 0, 5, 4], [0, 9, 7, 1, 2, 4, 5, 8, 6, 3], [6, 0, 9, 3, 2, 8, 7, 5, 1, 4], [4, 5, 3, 7, 0, 6, 2, 8, 9, 1], [6, 8, 7, 2, 9, 0, 1, 3, 5, 4], [1, 7, 2, 8, 5, 6, 4, 9, 0, 3], [4, 0, 5, 1, 3, 7, 2, 9, 8, 6], [2, 7, 1, 8, 6, 0, 3, 9, 5, 4], [0, 4, 5, 2, 3, 8, 6, 7, 9, 1], [7, 5, 9, 8, 1, 6, 0, 2, 4, 3], [7, 0, 5, 2, 8, 1, 4, 6, 3, 9], [7, 0, 2, 1, 5, 4, 8, 3, 6, 9], [4, 5, 1, 7, 6, 8, 3, 2, 0, 9], [1, 7, 3, 4, 5, 0, 2, 6, 8, 9], [6, 4, 7, 5, 0, 2, 1, 8, 3, 9], [7, 4, 2, 1, 8, 5, 0, 3, 9, 6], [3, 6, 1, 8, 0, 9, 5, 4, 7, 2], [5, 3, 2, 4, 7, 0, 8, 6, 9, 1], [7, 4, 3, 1, 0, 2, 8, 5, 9, 6], [7, 4, 1, 2, 3, 0, 8, 5, 6, 9], [7, 2, 5, 0, 8, 6, 9, 1, 3, 4], [7, 5, 1, 8, 0, 4, 6, 2, 3, 9], [7, 5, 2, 8, 4, 0, 3, 1, 9, 6], [4, 1, 2, 7, 8, 0, 6, 3, 5, 9], [2, 3, 4, 5, 8, 9, 6, 0, 7, 1], [4, 5, 0, 1, 3, 7, 8, 2, 9, 6], [1, 6, 0, 7, 8, 5, 4, 2, 3, 9], [7, 4, 5, 0, 2, 1, 6, 3, 8, 9], [8, 7, 4, 1, 5, 2, 0, 3, 6, 9], [7, 5, 4, 2, 3, 8, 1, 0, 6, 9], [4, 1, 2, 0, 8, 6, 3, 7, 9, 5], [7, 5, 2, 8, 9, 1, 6, 0, 3, 4], [5, 0, 7, 2, 8, 3, 4, 6, 1, 9], [8, 2, 3, 4, 5, 0, 6, 1, 9, 7], [7, 3, 0, 6, 4, 9, 2, 1, 8, 5], [5, 2, 7, 8, 0, 9, 6, 3, 4, 1], [7, 4, 5, 3, 2, 0, 1, 6, 8, 9], [2, 1, 0, 7, 5, 3, 6, 8, 9, 4], [2, 5, 0, 7, 8, 1, 9, 6, 4, 3], [7, 4, 1, 3, 8, 0, 2, 6, 9, 5], [5, 3, 0, 7, 2, 4, 1, 8, 6, 9], [2, 0, 3, 8, 4, 9, 5, 7, 1, 6], [5, 4, 1, 0, 3, 2, 7, 8, 9, 6], [4, 7, 1, 5, 0, 8, 2, 6, 9, 3], [1, 7, 5, 0, 2, 6, 3, 4, 8, 9], [4, 7, 5, 1, 8, 2, 3, 9, 6, 0], [4, 7, 5, 9, 2, 3, 1, 0, 6, 8], [5, 7, 2, 0, 8, 3, 9, 1, 6, 4], [2, 3, 6, 0, 8, 9, 7, 1, 5, 4], [7, 5, 4, 0, 3, 2, 8, 1, 6, 9], [5, 7, 2, 3, 0, 8, 1, 9, 6, 4], [5, 7, 1, 3, 2, 8, 9, 0, 6, 4], [0, 9, 5, 4, 7, 8, 1, 2, 6, 3], [7, 4, 1, 5, 8, 2, 0, 3, 6, 9], [6, 1, 0, 4, 3, 8, 2, 5, 7, 9], [4, 7, 1, 5, 2, 8, 3, 6, 0, 9], [5, 0, 1, 3, 6, 9, 7, 2, 8, 4], [3, 1, 0, 9, 6, 5, 8, 7, 2, 4], [4, 3, 1, 9, 5, 6, 7, 2, 8, 0], [1, 4, 7, 2, 0, 5, 3, 9, 8, 6], [5, 7, 0, 2, 6, 3, 1, 8, 9, 4], [7, 5, 4, 2, 1, 8, 9, 3, 0, 6], [7, 2, 5, 0, 8, 3, 6, 9, 1, 4], [4, 7, 0, 2, 8, 6, 5, 9, 3, 1], [7, 4, 3, 8, 2, 6, 0, 9, 1, 5], [7, 8, 4, 2, 6, 0, 3, 5, 1, 9], [7, 5, 2, 3, 0, 8, 1, 4, 9, 6], [4, 3, 0, 7, 9, 2, 8, 1, 6, 5], [4, 5, 1, 7, 2, 3, 8, 6, 9, 0], [7, 2, 1, 0, 8, 6, 3, 5, 9, 4], [4, 7, 5, 0, 8, 6, 2, 3, 9, 1], [1, 4, 5, 7, 2, 8, 3, 0, 6, 9], [7, 5, 0, 4, 3, 1, 8, 2, 6, 9], [7, 2, 5, 0, 8, 4, 3, 9, 6, 1], [1, 7, 2, 6, 4, 0, 8, 9, 5, 3], [1, 7, 5, 6, 2, 4, 8, 3, 0, 9], [4, 7, 6, 0, 5, 8, 9, 1, 2, 3], [6, 1, 7, 2, 8, 0, 5, 9, 3, 4], [7, 0, 3, 5, 4, 2, 8, 6, 9, 1], [1, 7, 4, 5, 0, 2, 9, 3, 6, 8], [7, 4, 5, 2, 1, 3, 0, 8, 9, 6], [7, 0, 2, 3, 5, 8, 1, 9, 6, 4], [2, 5, 9, 1, 3, 8, 6, 0, 4, 7], [7, 4, 3, 2, 9, 5, 8, 1, 0, 6], [7, 4, 5, 3, 8, 1, 2, 9, 0, 6], [5, 3, 6, 0, 2, 9, 7, 8, 1, 4], [4, 7, 1, 2, 5, 0, 8, 6, 3, 9], [0, 3, 6, 9, 4, 5, 7, 2, 8, 1], [3, 8, 5, 7, 2, 9, 4, 0, 6, 1], [7, 2, 8, 0, 6, 3, 9, 1, 4, 5], [0, 7, 2, 8, 6, 1, 3, 9, 5, 4], [3, 0, 2, 7, 1, 9, 8, 6, 5, 4], [2, 7, 0, 8, 6, 3, 1, 9, 5, 4], [8, 2, 1, 7, 6, 4, 5, 9, 0, 3], [7, 8, 1, 3, 5, 2, 0, 4, 9, 6], [0, 3, 6, 9, 2, 7, 4, 5, 1, 8], [7, 4, 2, 0, 5, 1, 3, 9, 8, 6], [7, 2, 4, 3, 5, 8, 0, 6, 1, 9], [7, 5, 2, 8, 0, 6, 3, 9, 1, 4], [8, 7, 2, 1, 4, 5, 0, 9, 6, 3], [7, 2, 8, 6, 0, 9, 4, 5, 3, 1], [2, 8, 6, 3, 7, 9, 5, 4, 0, 1], [7, 5, 6, 2, 3, 0, 8, 4, 1, 9], [4, 7, 2, 5, 0, 3, 8, 9, 6, 1], [1, 2, 7, 6, 4, 0, 8, 5, 3, 9], [5, 2, 7, 8, 0, 6, 3, 1, 4, 9], [5, 0, 2, 1, 7, 6, 8, 3, 4, 9], [1, 7, 0, 2, 4, 8, 3, 6, 9, 5], [7, 2, 3, 5, 8, 0, 6, 1, 9, 4], [4, 2, 7, 3, 6, 0, 5, 1, 8, 9], [4, 8, 1, 0, 2, 5, 7, 6, 9, 3], [5, 7, 2, 9, 8, 0, 6, 3, 4, 1], [7, 4, 5, 0, 2, 3, 1, 6, 8, 9], [7, 3, 1, 0, 9, 2, 8, 6, 5, 4], [5, 1, 4, 6, 8, 0, 7, 9, 3, 2], [0, 3, 6, 9, 2, 8, 7, 4, 5, 1], [2, 8, 9, 0, 6, 7, 3, 4, 1, 5], [7, 1, 0, 4, 8, 3, 2, 6, 5, 9], [0, 4, 1, 7, 3, 2, 5, 8, 6, 9], [0, 5, 4, 1, 6, 9, 3, 8, 2, 7], [7, 5, 2, 8, 0, 4, 1, 9, 3, 6], [7, 2, 0, 1, 6, 3, 8, 9, 4, 5], [7, 3, 0, 9, 5, 2, 8, 6, 1, 4], [9, 6, 0, 3, 8, 1, 5, 2, 4, 7], [0, 2, 8, 7, 3, 6, 9, 1, 5, 4], [5, 6, 3, 2, 9, 7, 8, 0, 1, 4], [2, 0, 5, 1, 7, 8, 3, 4, 9, 6], [7, 1, 4, 5, 6, 0, 2, 8, 3, 9], [0, 7, 4, 2, 5, 3, 8, 1, 6, 9], [7, 2, 4, 5, 0, 8, 3, 1, 6, 9], [4, 5, 1, 0, 7, 2, 3, 9, 8, 6], [4, 5, 7, 3, 0, 8, 2, 9, 1, 6], [7, 4, 5, 1, 0, 3, 6, 2, 8, 9], [4, 7, 5, 2, 3, 0, 8, 9, 6, 1], [0, 7, 6, 2, 1, 3, 8, 9, 5, 4], [0, 6, 1, 3, 7, 4, 5, 2, 8, 9], [7, 0, 5, 4, 1, 2, 8, 9, 6, 3], [0, 3, 9, 6, 8, 1, 5, 4, 7, 2], [4, 5, 3, 2, 7, 9, 8, 0, 1, 6], [5, 8, 1, 0, 3, 2, 6, 7, 9, 4], [7, 1, 5, 0, 2, 4, 8, 3, 6, 9], [7, 4, 5, 1, 2, 0, 8, 9, 6, 3], [5, 3, 6, 4, 7, 0, 1, 9, 2, 8], [0, 1, 7, 6, 2, 8, 4, 5, 3, 9], [0, 3, 8, 7, 6, 9, 2, 5, 1, 4], [7, 0, 9, 2, 5, 8, 3, 1, 4, 6], [6, 7, 3, 8, 2, 4, 0, 5, 9, 1], [5, 7, 1, 2, 3, 6, 8, 0, 4, 9], [7, 4, 5, 2, 8, 0, 1, 9, 3, 6], [7, 4, 5, 2, 8, 3, 1, 0, 6, 9], [1, 4, 8, 9, 2, 5, 6, 7, 0, 3], [7, 0, 6, 2, 1, 3, 8, 5, 9, 4], [4, 9, 1, 5, 3, 7, 0, 2, 8, 6], [3, 0, 6, 9, 1, 2, 7, 8, 5, 4], [4, 5, 7, 8, 2, 0, 1, 3, 6, 9], [1, 9, 6, 3, 8, 4, 5, 0, 2, 7], [5, 2, 7, 9, 8, 6, 0, 1, 3, 4], [7, 4, 5, 9, 6, 2, 8, 0, 3, 1], [5, 6, 0, 9, 4, 2, 7, 3, 1, 8], [0, 1, 7, 2, 6, 5, 8, 3, 9, 4], [4, 7, 3, 1, 5, 8, 2, 0, 9, 6], [7, 2, 8, 1, 0, 5, 6, 4, 3, 9], [7, 8, 6, 1, 9, 2, 3, 5, 4, 0], [3, 8, 2, 0, 7, 1, 9, 5, 4, 6], [4, 1, 7, 2, 0, 5, 3, 8, 9, 6], [7, 2, 1, 8, 3, 6, 0, 5, 9, 4], [7, 3, 2, 8, 1, 0, 9, 5, 6, 4], [7, 5, 4, 2, 0, 1, 3, 6, 8, 9], [7, 5, 8, 4, 0, 6, 1, 2, 9, 3], [7, 2, 0, 5, 8, 4, 6, 3, 9, 1], [0, 5, 4, 3, 7, 1, 2, 8, 9, 6], [7, 1, 4, 5, 3, 9, 6, 0, 8, 2], [0, 7, 3, 2, 9, 6, 8, 1, 5, 4], [1, 0, 2, 7, 8, 4, 3, 6, 9, 5], [7, 1, 2, 0, 3, 5, 4, 8, 9, 6], [4, 5, 7, 1, 2, 9, 6, 3, 0, 8], [7, 5, 8, 2, 0, 1, 6, 3, 9, 4], [4, 5, 7, 2, 3, 0, 6, 1, 8, 9], [0, 4, 5, 3, 2, 1, 6, 8, 9, 7], [7, 1, 8, 0, 5, 4, 3, 2, 6, 9], [7, 0, 2, 3, 5, 8, 1, 6, 9, 4], [7, 2, 5, 3, 0, 6, 8, 9, 1, 4], [4, 2, 1, 7, 3, 0, 9, 8, 6, 5], [7, 3, 0, 2, 8, 6, 1, 5, 4, 9], [7, 1, 2, 5, 8, 0, 3, 9, 6, 4], [7, 0, 1, 2, 6, 5, 3, 8, 9, 4], [7, 2, 1, 5, 8, 3, 6, 9, 0, 4], [7, 2, 1, 4, 5, 3, 0, 6, 8, 9], [7, 4, 5, 2, 1, 0, 8, 3, 6, 9], [5, 7, 3, 0, 6, 8, 2, 4, 9, 1], [4, 3, 5, 2, 7, 1, 8, 0, 9, 6], [2, 0, 5, 3, 4, 9, 6, 1, 7, 8], [4, 1, 7, 0, 6, 2, 8, 9, 5, 3], [3, 1, 0, 8, 7, 2, 6, 9, 5, 4], [7, 4, 2, 9, 8, 3, 5, 0, 6, 1], [6, 0, 7, 3, 1, 4, 8, 9, 5, 2], [1, 3, 0, 8, 9, 2, 6, 7, 4, 5], [4, 1, 5, 0, 2, 7, 9, 8, 3, 6], [7, 2, 8, 4, 0, 3, 5, 1, 6, 9], [0, 6, 1, 3, 4, 2, 9, 7, 8, 5], [7, 4, 6, 8, 1, 2, 0, 3, 5, 9], [4, 7, 5, 0, 2, 3, 1, 6, 8, 9], [7, 4, 5, 3, 2, 1, 0, 8, 6, 9], [5, 7, 1, 0, 8, 3, 6, 2, 4, 9], [7, 2, 1, 5, 4, 6, 0, 9, 8, 3], [6, 0, 1, 9, 3, 5, 2, 7, 8, 4], [6, 9, 7, 3, 0, 2, 8, 4, 5, 1], [5, 7, 0, 1, 3, 4, 9, 8, 2, 6], [7, 8, 9, 1, 2, 4, 0, 3, 6, 5], [0, 2, 3, 8, 1, 6, 9, 5, 7, 4], [6, 1, 3, 7, 0, 2, 8, 9, 4, 5], [8, 7, 2, 5, 4, 0, 3, 6, 1, 9], [6, 7, 2, 3, 8, 0, 9, 1, 4, 5], [4, 0, 8, 7, 2, 9, 3, 5, 1, 6], [7, 1, 0, 3, 2, 4, 8, 6, 5, 9], [3, 1, 7, 0, 8, 4, 2, 6, 5, 9], [0, 1, 5, 3, 6, 9, 2, 8, 7, 4], [7, 6, 8, 2, 0, 9, 3, 1, 4, 5], [7, 4, 5, 2, 6, 1, 0, 8, 3, 9], [7, 5, 2, 4, 0, 1, 3, 8, 9, 6], [4, 5, 7, 2, 3, 8, 0, 1, 6, 9], [7, 8, 2, 6, 1, 9, 3, 5, 4, 0], [1, 0, 6, 3, 5, 7, 2, 4, 8, 9], [7, 5, 0, 6, 2, 8, 1, 3, 9, 4], [0, 5, 1, 7, 4, 8, 9, 2, 6, 3], [7, 1, 0, 8, 2, 9, 6, 3, 4, 5], [7, 4, 2, 8, 5, 3, 9, 1, 0, 6], [3, 7, 5, 2, 0, 9, 8, 1, 6, 4], [7, 2, 4, 8, 5, 1, 0, 6, 3, 9], [5, 7, 3, 9, 0, 2, 4, 8, 6, 1], [0, 6, 1, 3, 9, 2, 7, 8, 5, 4], [1, 7, 5, 4, 0, 2, 8, 6, 9, 3], [5, 7, 2, 0, 4, 8, 9, 6, 3, 1], [7, 4, 1, 5, 8, 2, 3, 0, 6, 9], [5, 0, 2, 7, 1, 6, 3, 8, 9, 4], [7, 1, 2, 6, 8, 0, 9, 5, 4, 3], [6, 2, 1, 7, 8, 9, 4, 0, 3, 5], [1, 0, 7, 4, 8, 2, 6, 3, 5, 9], [2, 4, 0, 1, 7, 5, 8, 6, 3, 9], [4, 1, 7, 2, 5, 8, 3, 9, 0, 6], [3, 1, 5, 7, 2, 8, 9, 0, 4, 6], [1, 5, 4, 7, 2, 8, 3, 6, 9, 0], [5, 1, 8, 4, 7, 2, 6, 9, 0, 3], [4, 7, 5, 1, 2, 0, 3, 8, 6, 9], [7, 1, 5, 6, 2, 8, 3, 9, 4, 0], [4, 2, 3, 5, 6, 9, 1, 8, 7, 0], [7, 1, 0, 8, 2, 6, 3, 5, 4, 9], [7, 2, 4, 1, 5, 8, 0, 3, 9, 6], [6, 1, 3, 8, 2, 5, 0, 7, 4, 9], [7, 1, 5, 6, 4, 2, 0, 8, 3, 9], [7, 0, 3, 6, 9, 1, 8, 2, 4, 5], [4, 5, 0, 7, 2, 3, 1, 8, 9, 6], [7, 5, 8, 2, 0, 9, 1, 6, 4, 3], [7, 5, 2, 8, 6, 1, 4, 0, 3, 9], [4, 5, 1, 7, 2, 3, 0, 6, 8, 9], [7, 1, 0, 6, 8, 9, 2, 4, 5, 3], [2, 4, 1, 0, 3, 7, 8, 6, 9, 5], [5, 2, 6, 8, 7, 1, 9, 0, 3, 4], [7, 8, 6, 5, 1, 2, 3, 0, 4, 9], [2, 7, 5, 3, 8, 9, 0, 6, 1, 4], [4, 7, 1, 5, 3, 0, 8, 2, 6, 9], [5, 4, 1, 2, 3, 7, 0, 8, 9, 6], [7, 2, 6, 3, 0, 4, 8, 5, 1, 9], [7, 1, 4, 0, 5, 8, 2, 3, 9, 6], [6, 0, 9, 3, 8, 1, 5, 4, 2, 7], [5, 1, 4, 7, 2, 0, 8, 3, 6, 9], [2, 7, 8, 4, 0, 3, 9, 5, 1, 6], [5, 2, 7, 0, 6, 8, 3, 9, 4, 1], [3, 1, 0, 8, 2, 7, 6, 9, 5, 4], [5, 2, 6, 3, 0, 4, 7, 1, 8, 9], [2, 8, 7, 1, 0, 5, 4, 6, 3, 9], [7, 8, 1, 3, 0, 2, 6, 5, 4, 9], [4, 1, 7, 5, 8, 3, 2, 0, 6, 9], [7, 2, 1, 4, 5, 3, 8, 6, 9, 0], [0, 3, 6, 7, 1, 9, 8, 2, 5, 4], [0, 1, 5, 7, 3, 4, 9, 8, 2, 6], [1, 5, 0, 7, 2, 4, 3, 8, 9, 6], [0, 6, 2, 5, 7, 4, 8, 3, 9, 1], [5, 2, 7, 6, 8, 0, 9, 1, 4, 3], [1, 3, 7, 0, 2, 5, 6, 8, 4, 9], [1, 8, 2, 5, 7, 0, 4, 3, 6, 9], [4, 7, 8, 1, 0, 3, 2, 5, 6, 9], [7, 0, 8, 5, 1, 4, 2, 3, 6, 9], [7, 2, 8, 0, 1, 5, 4, 6, 9, 3], [2, 3, 8, 0, 9, 6, 1, 5, 7, 4], [7, 5, 3, 4, 1, 8, 2, 0, 6, 9], [2, 1, 3, 8, 0, 4, 7, 9, 5, 6], [6, 9, 1, 5, 0, 4, 3, 8, 2, 7], [7, 1, 2, 4, 5, 3, 6, 8, 0, 9], [7, 3, 4, 0, 9, 5, 2, 6, 8, 1], [2, 8, 3, 7, 0, 1, 9, 6, 5, 4], [2, 7, 4, 8, 0, 6, 3, 1, 5, 9], [4, 7, 8, 0, 2, 5, 3, 1, 6, 9], [7, 5, 4, 1, 0, 2, 8, 9, 6, 3], [1, 4, 5, 7, 2, 8, 0, 3, 6, 9], [4, 5, 7, 2, 8, 0, 9, 3, 6, 1], [4, 5, 2, 3, 8, 9, 7, 6, 1, 0], [5, 4, 0, 3, 7, 1, 2, 8, 9, 6], [7, 5, 6, 2, 0, 3, 9, 8, 4, 1], [7, 5, 1, 2, 3, 6, 0, 4, 8, 9], [5, 7, 6, 2, 8, 9, 0, 1, 4, 3], [7, 0, 4, 6, 2, 8, 1, 9, 5, 3], [4, 0, 7, 8, 9, 2, 3, 5, 1, 6], [5, 1, 7, 6, 0, 4, 2, 9, 3, 8], [5, 0, 7, 3, 2, 4, 1, 8, 6, 9], [0, 3, 1, 9, 2, 6, 7, 8, 4, 5], [1, 7, 5, 4, 2, 0, 8, 3, 6, 9], [5, 7, 8, 1, 2, 3, 0, 4, 6, 9], [0, 7, 8, 3, 2, 9, 6, 4, 5, 1], [4, 7, 5, 2, 1, 3, 8, 0, 6, 9], [1, 7, 8, 0, 2, 6, 9, 3, 4, 5], [6, 5, 1, 2, 0, 8, 4, 7, 9, 3], [1, 7, 2, 8, 3, 0, 6, 5, 9, 4], [2, 1, 4, 7, 3, 0, 8, 5, 9, 6], [1, 4, 5, 7, 2, 0, 8, 6, 9, 3], [5, 1, 7, 6, 3, 0, 4, 8, 2, 9], [4, 1, 7, 0, 8, 5, 6, 2, 9, 3], [5, 2, 3, 7, 0, 8, 9, 6, 4, 1], [0, 2, 7, 3, 8, 6, 9, 5, 4, 1], [3, 6, 5, 8, 1, 7, 2, 0, 4, 9], [4, 5, 7, 6, 0, 2, 8, 3, 9, 1], [2, 7, 6, 4, 8, 1, 3, 5, 0, 9], [7, 4, 5, 1, 3, 8, 0, 2, 6, 9], [4, 7, 2, 0, 3, 5, 1, 8, 6, 9], [6, 0, 1, 2, 7, 8, 9, 4, 3, 5], [4, 7, 5, 0, 3, 8, 2, 6, 9, 1], [0, 3, 9, 6, 1, 2, 7, 8, 4, 5], [7, 2, 8, 1, 6, 3, 0, 9, 4, 5], [1, 4, 3, 5, 0, 7, 8, 6, 2, 9], [2, 7, 5, 4, 8, 1, 6, 0, 9, 3], [7, 4, 2, 5, 3, 8, 1, 6, 0, 9], [4, 7, 3, 2, 8, 9, 5, 0, 6, 1], [6, 3, 9, 4, 0, 8, 7, 1, 5, 2], [4, 7, 0, 5, 2, 1, 8, 3, 6, 9], [1, 7, 6, 3, 5, 4, 8, 2, 0, 9], [5, 7, 2, 8, 4, 0, 9, 3, 1, 6], [7, 2, 4, 8, 1, 0, 5, 3, 6, 9], [8, 7, 4, 0, 5, 6, 1, 2, 3, 9], [3, 7, 2, 1, 6, 8, 9, 4, 5, 0], [0, 5, 3, 4, 1, 7, 8, 2, 6, 9], [7, 4, 2, 3, 8, 0, 5, 9, 6, 1], [7, 2, 8, 0, 3, 1, 5, 4, 6, 9], [1, 5, 4, 0, 7, 3, 2, 8, 6, 9], [2, 4, 5, 7, 1, 0, 8, 3, 6, 9], [1, 0, 7, 8, 3, 2, 5, 4, 6, 9], [1, 7, 4, 6, 5, 3, 8, 2, 9, 0], [3, 7, 8, 2, 4, 5, 0, 9, 1, 6], [7, 0, 1, 4, 5, 3, 2, 6, 8, 9], [7, 1, 5, 2, 6, 8, 3, 4, 9, 0], [2, 7, 1, 0, 6, 8, 9, 3, 5, 4], [5, 7, 2, 0, 8, 3, 1, 4, 6, 9], [7, 4, 5, 1, 0, 2, 3, 8, 6, 9], [4, 5, 0, 3, 7, 2, 8, 1, 9, 6], [3, 4, 0, 7, 9, 8, 1, 6, 5, 2], [1, 4, 5, 7, 8, 6, 9, 2, 3, 0], [6, 0, 9, 1, 3, 7, 4, 2, 8, 5], [7, 0, 2, 3, 6, 8, 9, 1, 4, 5], [8, 4, 7, 2, 6, 5, 0, 3, 1, 9], [7, 2, 4, 8, 5, 1, 6, 0, 9, 3], [3, 7, 2, 0, 8, 5, 4, 6, 9, 1], [5, 0, 7, 2, 3, 6, 8, 9, 1, 4], [7, 0, 5, 1, 6, 2, 9, 8, 3, 4], [7, 5, 4, 2, 0, 1, 8, 3, 9, 6], [7, 4, 5, 8, 2, 3, 0, 1, 6, 9], [0, 8, 2, 7, 5, 1, 4, 3, 6, 9], [1, 0, 7, 4, 3, 2, 5, 8, 9, 6], [7, 5, 0, 2, 1, 6, 8, 9, 3, 4], [4, 7, 1, 0, 3, 5, 2, 8, 6, 9], [2, 7, 5, 4, 3, 8, 6, 0, 1, 9], [0, 4, 5, 3, 7, 2, 6, 9, 8, 1], [7, 2, 5, 3, 8, 0, 4, 6, 1, 9], [4, 5, 7, 1, 8, 0, 3, 2, 9, 6], [0, 5, 1, 6, 4, 3, 9, 8, 7, 2], [4, 0, 1, 5, 3, 6, 7, 9, 8, 2], [4, 5, 1, 7, 2, 0, 3, 8, 9, 6], [3, 2, 8, 5, 4, 0, 9, 1, 7, 6], [2, 3, 8, 9, 7, 6, 5, 1, 0, 4], [7, 4, 2, 5, 3, 0, 8, 6, 1, 9], [7, 1, 4, 5, 3, 0, 6, 2, 8, 9], [0, 2, 1, 8, 7, 5, 9, 6, 3, 4], [5, 7, 6, 2, 0, 3, 8, 1, 9, 4], [7, 4, 5, 2, 1, 6, 8, 3, 0, 9], [6, 2, 4, 0, 5, 1, 7, 8, 3, 9], [7, 5, 2, 6, 1, 4, 8, 3, 9, 0], [8, 3, 7, 1, 4, 5, 2, 0, 9, 6], [7, 2, 8, 3, 9, 1, 5, 0, 4, 6], [7, 2, 4, 5, 8, 3, 0, 6, 9, 1], [5, 4, 7, 1, 0, 8, 2, 9, 3, 6], [7, 2, 4, 6, 0, 1, 3, 5, 8, 9], [7, 2, 6, 0, 8, 9, 3, 5, 1, 4], [4, 7, 5, 2, 0, 3, 1, 8, 6, 9], [7, 5, 1, 4, 0, 8, 3, 9, 2, 6], [1, 4, 7, 5, 0, 3, 2, 8, 6, 9], [0, 1, 3, 8, 6, 5, 2, 7, 4, 9], [1, 2, 3, 7, 8, 0, 6, 5, 9, 4], [4, 7, 5, 8, 2, 0, 1, 3, 6, 9], [7, 2, 9, 8, 6, 3, 0, 1, 5, 4], [7, 0, 1, 6, 2, 3, 5, 8, 9, 4], [4, 7, 5, 0, 3, 2, 8, 1, 6, 9], [2, 5, 0, 8, 9, 7, 3, 1, 6, 4], [7, 5, 4, 1, 8, 3, 2, 0, 6, 9], [1, 7, 5, 2, 8, 3, 6, 9, 0, 4], [0, 7, 4, 5, 2, 8, 1, 6, 9, 3], [3, 4, 5, 7, 9, 2, 0, 1, 8, 6], [1, 2, 7, 8, 5, 4, 3, 6, 9, 0], [4, 2, 0, 9, 3, 1, 7, 6, 5, 8], [7, 5, 4, 2, 0, 8, 3, 1, 9, 6], [4, 0, 3, 5, 1, 7, 6, 9, 2, 8], [4, 5, 2, 7, 3, 0, 1, 6, 9, 8], [2, 6, 3, 7, 0, 8, 9, 1, 5, 4], [7, 4, 8, 0, 1, 6, 5, 2, 9, 3], [7, 4, 5, 2, 8, 3, 0, 1, 9, 6], [3, 1, 0, 7, 5, 9, 2, 8, 6, 4], [3, 1, 7, 2, 4, 5, 8, 0, 9, 6], [5, 3, 7, 8, 2, 6, 9, 4, 1, 0], [7, 5, 4, 2, 1, 0, 6, 3, 8, 9], [4, 7, 2, 1, 3, 5, 9, 0, 8, 6], [7, 5, 1, 6, 9, 2, 8, 0, 4, 3], [7, 4, 2, 5, 3, 0, 8, 1, 9, 6], [7, 5, 2, 8, 1, 6, 3, 0, 4, 9], [2, 8, 0, 3, 7, 9, 6, 1, 4, 5], [7, 2, 5, 6, 1, 8, 0, 3, 9, 4], [7, 4, 2, 3, 1, 5, 8, 9, 0, 6], [7, 0, 1, 3, 2, 9, 8, 6, 5, 4], [5, 7, 0, 4, 2, 1, 8, 9, 3, 6], [6, 9, 1, 3, 0, 7, 8, 2, 5, 4], [7, 2, 8, 0, 6, 5, 3, 9, 1, 4], [7, 4, 2, 1, 5, 8, 6, 3, 9, 0], [4, 0, 3, 2, 1, 7, 9, 8, 6, 5], [4, 7, 5, 1, 6, 3, 9, 0, 8, 2], [7, 8, 2, 4, 1, 9, 0, 3, 6, 5], [7, 3, 4, 1, 2, 0, 8, 5, 6, 9], [4, 7, 5, 1, 0, 3, 8, 6, 2, 9], [2, 8, 1, 7, 9, 6, 0, 3, 5, 4], [4, 0, 1, 5, 3, 8, 2, 7, 6, 9], [4, 5, 7, 0, 1, 3, 2, 6, 8, 9], [7, 2, 8, 9, 6, 1, 0, 3, 4, 5], [1, 7, 0, 4, 2, 5, 6, 8, 3, 9], [7, 5, 0, 8, 3, 2, 9, 6, 1, 4], [2, 0, 5, 7, 1, 6, 4, 8, 9, 3], [5, 6, 2, 8, 0, 3, 9, 4, 1, 7], [0, 2, 3, 1, 7, 6, 4, 5, 8, 9], [4, 5, 0, 3, 1, 9, 8, 2, 6, 7], [3, 0, 2, 7, 4, 5, 8, 1, 6, 9], [7, 4, 2, 0, 5, 8, 3, 6, 9, 1], [4, 1, 7, 3, 2, 6, 5, 0, 8, 9], [1, 6, 3, 9, 2, 8, 0, 7, 4, 5], [6, 7, 0, 1, 2, 3, 4, 5, 8, 9], [7, 4, 8, 5, 0, 1, 2, 3, 6, 9], [0, 9, 5, 3, 1, 8, 6, 7, 4, 2], [7, 5, 1, 8, 2, 6, 3, 0, 9, 4], [0, 7, 4, 1, 3, 8, 2, 6, 9, 5], [5, 6, 7, 2, 8, 0, 1, 4, 3, 9], [7, 4, 5, 2, 8, 1, 3, 0, 9, 6], [1, 0, 9, 7, 3, 2, 4, 5, 8, 6], [7, 4, 1, 2, 0, 3, 5, 8, 6, 9], [4, 7, 2, 8, 5, 0, 9, 3, 6, 1], [7, 2, 3, 0, 6, 1, 9, 5, 8, 4], [4, 0, 3, 1, 5, 8, 9, 7, 6, 2], [7, 1, 2, 8, 5, 0, 3, 6, 9, 4], [4, 7, 1, 3, 5, 8, 0, 2, 9, 6], [1, 0, 6, 7, 3, 2, 8, 4, 5, 9], [7, 2, 4, 3, 5, 8, 0, 6, 9, 1], [4, 2, 1, 5, 8, 9, 7, 6, 0, 3], [4, 5, 0, 7, 2, 1, 3, 6, 8, 9], [0, 1, 8, 3, 2, 6, 7, 9, 5, 4], [4, 2, 7, 8, 3, 6, 1, 9, 5, 0], [5, 7, 1, 4, 8, 0, 2, 6, 9, 3], [3, 0, 7, 2, 1, 9, 8, 6, 5, 4], [5, 0, 7, 3, 8, 6, 4, 1, 9, 2], [7, 2, 8, 5, 6, 1, 4, 0, 9, 3], [9, 8, 5, 2, 7, 1, 6, 3, 0, 4], [7, 3, 4, 2, 5, 0, 8, 1, 6, 9], [4, 7, 2, 3, 5, 0, 8, 1, 6, 9], [7, 3, 0, 5, 2, 8, 1, 6, 9, 4], [4, 7, 0, 5, 2, 8, 3, 1, 6, 9], [7, 5, 8, 4, 2, 0, 3, 1, 9, 6], [0, 4, 5, 7, 2, 3, 1, 8, 9, 6], [5, 0, 1, 9, 6, 3, 8, 4, 7, 2], [5, 4, 3, 0, 7, 8, 2, 6, 1, 9], [0, 6, 9, 1, 8, 7, 2, 4, 5, 3], [7, 9, 5, 4, 8, 2, 6, 1, 0, 3], [7, 1, 8, 5, 2, 0, 4, 9, 6, 3], [1, 2, 7, 0, 5, 8, 3, 6, 9, 4], [4, 5, 7, 3, 1, 2, 8, 6, 9, 0], [7, 5, 2, 6, 0, 8, 3, 1, 9, 4], [4, 7, 0, 1, 2, 5, 8, 6, 3, 9], [4, 7, 3, 0, 5, 6, 8, 2, 9, 1], [7, 4, 2, 1, 0, 3, 6, 8, 9, 5], [7, 5, 4, 2, 6, 3, 1, 8, 9, 0], [4, 7, 1, 2, 8, 0, 9, 5, 6, 3], [5, 7, 0, 8, 2, 6, 9, 3, 1, 4], [7, 8, 1, 4, 5, 0, 6, 3, 9, 2], [4, 7, 3, 2, 5, 6, 8, 1, 9, 0], [2, 0, 8, 3, 6, 9, 1, 7, 5, 4], [4, 7, 5, 2, 8, 0, 1, 3, 6, 9], [4, 5, 0, 7, 2, 8, 3, 9, 1, 6], [7, 5, 6, 3, 4, 8, 1, 0, 2, 9], [1, 7, 0, 2, 5, 3, 8, 4, 6, 9], [7, 2, 1, 0, 8, 6, 5, 3, 4, 9], [0, 5, 7, 8, 2, 4, 9, 1, 6, 3], [5, 4, 0, 2, 7, 8, 6, 3, 9, 1], [4, 1, 3, 0, 9, 6, 7, 2, 8, 5], [5, 7, 2, 8, 0, 1, 6, 9, 3, 4], [4, 7, 5, 8, 1, 2, 0, 3, 6, 9], [6, 3, 2, 0, 5, 8, 9, 7, 1, 4], [2, 7, 3, 4, 5, 8, 0, 1, 9, 6], [7, 4, 5, 1, 0, 2, 8, 3, 9, 6], [6, 4, 1, 3, 5, 0, 7, 9, 8, 2], [0, 1, 2, 3, 5, 9, 8, 6, 4, 7], [2, 1, 8, 0, 6, 7, 3, 9, 4, 5], [7, 0, 2, 4, 8, 5, 3, 6, 9, 1], [0, 5, 4, 8, 9, 6, 1, 7, 2, 3], [5, 4, 1, 2, 8, 3, 0, 9, 7, 6], [0, 7, 6, 3, 8, 5, 2, 9, 1, 4], [0, 7, 2, 1, 4, 5, 6, 8, 3, 9], [4, 7, 3, 2, 8, 9, 0, 5, 1, 6], [5, 3, 2, 6, 1, 7, 0, 8, 9, 4], [4, 7, 1, 5, 8, 6, 0, 3, 2, 9], [4, 7, 5, 1, 2, 8, 0, 6, 3, 9], [5, 7, 2, 6, 0, 8, 9, 4, 3, 1], [2, 4, 1, 7, 0, 6, 3, 5, 8, 9], [7, 1, 2, 5, 9, 6, 0, 8, 3, 4], [5, 1, 8, 7, 2, 0, 6, 4, 3, 9], [0, 2, 3, 5, 8, 7, 9, 4, 6, 1], [6, 3, 0, 7, 1, 8, 2, 4, 5, 9], [7, 1, 2, 0, 8, 6, 3, 9, 5, 4], [7, 4, 8, 1, 5, 0, 2, 9, 3, 6], [7, 1, 0, 2, 8, 3, 6, 9, 4, 5], [8, 5, 2, 3, 0, 7, 4, 9, 1, 6], [7, 1, 4, 5, 6, 2, 3, 0, 8, 9], [2, 7, 6, 1, 8, 4, 5, 0, 3, 9], [5, 7, 0, 3, 2, 1, 8, 9, 6, 4], [2, 5, 3, 4, 0, 8, 1, 9, 6, 7], [4, 5, 1, 7, 2, 3, 8, 9, 6, 0], [1, 7, 5, 9, 2, 3, 6, 0, 4, 8], [5, 7, 3, 1, 2, 6, 8, 0, 9, 4], [7, 4, 0, 1, 5, 3, 2, 8, 6, 9], [0, 1, 3, 7, 2, 4, 5, 8, 9, 6], [1, 8, 0, 2, 5, 6, 7, 9, 4, 3], [1, 4, 7, 3, 2, 9, 0, 5, 8, 6], [3, 0, 4, 1, 5, 6, 2, 8, 9, 7], [7, 2, 1, 0, 8, 5, 9, 6, 3, 4], [0, 2, 1, 6, 5, 7, 8, 4, 3, 9], [7, 4, 5, 0, 3, 9, 2, 8, 6, 1], [7, 4, 0, 2, 5, 3, 1, 8, 9, 6], [7, 2, 5, 3, 0, 4, 6, 9, 1, 8], [3, 0, 1, 8, 5, 2, 7, 9, 4, 6], [4, 5, 0, 1, 6, 9, 3, 7, 2, 8], [5, 0, 7, 2, 3, 1, 8, 4, 6, 9], [7, 2, 4, 3, 1, 0, 6, 5, 9, 8], [0, 7, 2, 8, 1, 5, 3, 4, 6, 9], [7, 2, 8, 0, 3, 9, 6, 5, 4, 1], [5, 7, 2, 1, 8, 3, 6, 4, 9, 0], [7, 4, 2, 0, 1, 5, 8, 3, 6, 9], [4, 3, 7, 0, 8, 2, 1, 9, 5, 6], [9, 2, 3, 0, 6, 1, 8, 7, 5, 4], [7, 2, 0, 6, 3, 8, 9, 4, 1, 5], [7, 2, 0, 3, 8, 6, 9, 1, 4, 5], [7, 8, 1, 6, 5, 0, 2, 3, 4, 9], [6, 0, 5, 1, 7, 4, 9, 3, 2, 8], [7, 5, 4, 3, 2, 1, 6, 0, 8, 9], [3, 0, 1, 6, 9, 8, 2, 7, 5, 4], [4, 5, 7, 8, 2, 6, 3, 9, 1, 0], [1, 6, 9, 4, 5, 7, 8, 2, 0, 3], [7, 4, 2, 0, 5, 3, 8, 6, 1, 9], [7, 0, 3, 5, 1, 2, 8, 4, 9, 6], [5, 0, 9, 6, 7, 2, 1, 3, 8, 4], [0, 7, 8, 2, 3, 5, 1, 6, 9, 4], [5, 1, 6, 0, 2, 8, 9, 7, 4, 3], [7, 1, 2, 4, 0, 8, 6, 3, 9, 5], [7, 4, 5, 0, 3, 8, 6, 2, 9, 1], [8, 7, 5, 0, 3, 9, 6, 4, 2, 1], [7, 4, 5, 3, 0, 1, 8, 2, 6, 9], [1, 5, 3, 7, 2, 8, 6, 0, 4, 9], [5, 7, 2, 8, 0, 9, 6, 3, 4, 1], [5, 2, 3, 1, 8, 9, 6, 7, 0, 4], [4, 3, 0, 1, 7, 6, 2, 8, 5, 9], [4, 5, 7, 6, 1, 8, 2, 0, 3, 9], [5, 7, 3, 1, 0, 4, 6, 2, 8, 9], [2, 7, 3, 4, 0, 1, 6, 8, 9, 5], [2, 7, 8, 0, 9, 1, 6, 5, 3, 4], [5, 7, 1, 0, 4, 2, 8, 3, 6, 9], [4, 7, 8, 0, 2, 3, 5, 9, 6, 1], [7, 2, 8, 6, 0, 9, 3, 4, 5, 1], [7, 8, 2, 4, 0, 3, 1, 9, 6, 5], [0, 3, 2, 5, 7, 1, 4, 6, 9, 8], [4, 5, 7, 0, 2, 1, 8, 6, 3, 9], [4, 7, 0, 2, 8, 5, 1, 3, 9, 6], [8, 1, 0, 3, 7, 2, 5, 6, 4, 9], [4, 1, 7, 8, 6, 0, 2, 3, 9, 5], [4, 7, 1, 5, 0, 3, 2, 6, 9, 8], [0, 1, 3, 2, 4, 7, 8, 9, 6, 5], [7, 5, 2, 0, 4, 3, 1, 6, 8, 9], [7, 5, 0, 1, 3, 2, 8, 6, 9, 4], [0, 7, 5, 1, 2, 4, 8, 3, 9, 6], [5, 7, 3, 2, 8, 0, 6, 9, 1, 4], [2, 7, 5, 6, 8, 9, 3, 1, 0, 4], [7, 4, 0, 2, 5, 1, 8, 3, 6, 9], [8, 9, 7, 6, 0, 3, 2, 1, 4, 5], [1, 6, 7, 0, 2, 9, 8, 3, 4, 5], [7, 4, 5, 1, 0, 2, 8, 3, 9, 6], [7, 3, 4, 1, 8, 6, 2, 9, 5, 0], [1, 3, 4, 7, 6, 5, 2, 8, 0, 9], [6, 9, 0, 8, 2, 5, 7, 3, 1, 4], [1, 0, 2, 7, 3, 8, 4, 5, 6, 9], [7, 3, 2, 0, 4, 1, 6, 5, 8, 9], [8, 7, 2, 1, 3, 0, 5, 6, 9, 4], [3, 6, 0, 8, 5, 2, 9, 4, 1, 7], [1, 4, 7, 0, 3, 2, 5, 6, 8, 9], [1, 7, 5, 0, 3, 2, 8, 6, 9, 4], [2, 7, 5, 4, 8, 9, 6, 1, 0, 3], [5, 4, 1, 7, 2, 6, 8, 3, 9, 0], [4, 1, 7, 0, 5, 8, 2, 3, 9, 6], [7, 4, 2, 1, 8, 3, 5, 0, 6, 9], [5, 6, 9, 7, 2, 0, 3, 1, 8, 4], [7, 0, 3, 2, 1, 5, 4, 6, 8, 9], [0, 3, 1, 4, 7, 2, 6, 5, 8, 9], [7, 8, 6, 2, 5, 9, 1, 3, 0, 4], [1, 7, 2, 0, 5, 8, 6, 4, 9, 3], [1, 2, 0, 8, 3, 5, 6, 4, 9, 7], [0, 7, 1, 5, 4, 6, 3, 2, 8, 9], [4, 7, 5, 2, 3, 0, 1, 8, 9, 6], [2, 0, 1, 8, 4, 5, 7, 3, 6, 9], [7, 2, 5, 0, 3, 6, 4, 8, 9, 1], [2, 7, 5, 8, 3, 4, 6, 9, 0, 1], [4, 5, 7, 2, 0, 3, 1, 8, 6, 9], [7, 2, 1, 4, 3, 0, 5, 8, 6, 9], [7, 0, 6, 1, 8, 2, 3, 9, 4, 5], [6, 9, 0, 1, 5, 3, 2, 7, 8, 4], [7, 4, 5, 2, 8, 3, 6, 0, 1, 9], [4, 1, 5, 7, 2, 0, 3, 8, 9, 6], [7, 4, 5, 0, 2, 1, 3, 6, 8, 9], [8, 7, 2, 3, 9, 0, 4, 1, 5, 6], [7, 5, 0, 1, 2, 8, 6, 4, 3, 9], [4, 3, 2, 8, 1, 5, 7, 6, 0, 9], [5, 4, 7, 0, 1, 2, 8, 3, 9, 6], [7, 2, 6, 5, 8, 9, 4, 1, 3, 0], [2, 7, 1, 4, 8, 5, 6, 9, 0, 3], [7, 2, 1, 8, 3, 4, 9, 5, 6, 0], [1, 4, 2, 5, 3, 7, 8, 6, 0, 9], [7, 8, 2, 6, 9, 1, 5, 3, 0, 4], [3, 1, 8, 7, 2, 0, 6, 9, 5, 4], [4, 1, 0, 7, 8, 5, 2, 6, 9, 3], [7, 0, 8, 3, 2, 5, 1, 9, 6, 4], [7, 0, 3, 8, 2, 6, 9, 5, 4, 1], [0, 3, 5, 7, 6, 9, 2, 1, 4, 8], [7, 5, 8, 2, 6, 4, 3, 0, 9, 1], [2, 8, 7, 1, 6, 3, 5, 9, 4, 0], [0, 9, 3, 1, 8, 7, 5, 4, 6, 2], [6, 9, 7, 2, 5, 8, 3, 1, 0, 4], [2, 1, 0, 8, 3, 5, 6, 7, 9, 4], [2, 0, 7, 6, 8, 9, 3, 5, 1, 4], [4, 1, 3, 7, 2, 5, 9, 6, 8, 0], [5, 7, 1, 2, 0, 3, 8, 6, 9, 4], [7, 4, 5, 2, 1, 6, 0, 8, 3, 9], [0, 2, 8, 3, 1, 9, 7, 6, 4, 5], [7, 8, 0, 5, 6, 2, 3, 9, 1, 4], [5, 4, 2, 0, 7, 8, 1, 9, 3, 6], [7, 4, 0, 1, 2, 3, 5, 8, 9, 6], [8, 7, 5, 6, 3, 1, 4, 0, 2, 9], [8, 1, 2, 5, 7, 0, 3, 4, 9, 6], [7, 4, 1, 5, 0, 2, 3, 8, 6, 9], [1, 2, 8, 7, 3, 9, 6, 0, 5, 4], [7, 3, 0, 2, 4, 8, 1, 6, 5, 9], [4, 7, 6, 5, 2, 3, 8, 0, 9, 1], [7, 1, 4, 2, 5, 8, 6, 0, 3, 9], [1, 4, 5, 8, 6, 0, 3, 7, 2, 9], [7, 2, 0, 6, 8, 1, 9, 5, 4, 3], [5, 0, 4, 2, 1, 3, 6, 7, 8, 9], [2, 1, 3, 0, 6, 5, 8, 7, 9, 4], [7, 4, 2, 1, 0, 8, 3, 6, 5, 9], [7, 0, 6, 3, 1, 5, 9, 2, 8, 4], [8, 2, 3, 1, 7, 9, 0, 6, 5, 4], [7, 0, 2, 5, 8, 4, 3, 1, 6, 9], [6, 7, 1, 2, 3, 0, 5, 8, 9, 4], [1, 8, 7, 2, 3, 0, 5, 6, 9, 4], [1, 0, 7, 2, 6, 5, 8, 3, 9, 4], [2, 3, 7, 8, 6, 0, 1, 9, 5, 4], [4, 3, 2, 5, 7, 0, 8, 6, 1, 9], [6, 2, 0, 7, 9, 8, 3, 1, 5, 4], [0, 3, 6, 8, 9, 1, 5, 7, 2, 4], [3, 4, 7, 0, 5, 1, 2, 6, 8, 9], [7, 5, 8, 1, 2, 0, 3, 6, 9, 4], [5, 0, 3, 1, 4, 7, 8, 6, 2, 9], [4, 7, 5, 8, 0, 2, 1, 6, 3, 9], [7, 1, 5, 2, 6, 4, 8, 0, 3, 9], [7, 2, 1, 5, 3, 6, 9, 8, 0, 4], [7, 1, 2, 8, 3, 0, 6, 9, 4, 5], [3, 0, 4, 2, 7, 5, 8, 9, 1, 6], [2, 7, 8, 3, 0, 5, 4, 6, 1, 9], [4, 7, 5, 1, 8, 2, 0, 3, 6, 9], [7, 2, 4, 5, 1, 8, 0, 3, 9, 6], [3, 0, 2, 7, 8, 5, 4, 6, 9, 1], [4, 7, 1, 2, 8, 5, 3, 0, 6, 9], [4, 7, 0, 5, 2, 8, 6, 9, 1, 3], [1, 0, 3, 2, 8, 6, 9, 7, 4, 5], [7, 1, 4, 6, 0, 8, 3, 9, 2, 5], [5, 7, 0, 4, 2, 8, 1, 3, 6, 9], [0, 7, 2, 8, 9, 5, 6, 4, 3, 1], [0, 3, 6, 9, 5, 1, 2, 7, 4, 8], [0, 5, 1, 3, 6, 4, 7, 2, 8, 9], [8, 2, 7, 3, 0, 6, 1, 4, 9, 5], [7, 4, 1, 5, 0, 8, 2, 6, 3, 9], [1, 7, 5, 2, 8, 0, 6, 3, 9, 4], [4, 1, 5, 3, 0, 7, 2, 8, 6, 9], [5, 3, 4, 2, 7, 0, 8, 9, 6, 1], [0, 3, 7, 9, 2, 6, 8, 5, 4, 1], [4, 7, 1, 0, 5, 2, 8, 3, 9, 6], [4, 7, 0, 2, 5, 3, 1, 6, 8, 9], [7, 6, 5, 0, 4, 2, 8, 1, 3, 9], [7, 4, 5, 1, 3, 2, 8, 9, 0, 6], [1, 7, 2, 0, 8, 6, 5, 3, 9, 4], [5, 4, 3, 2, 7, 6, 0, 9, 8, 1], [4, 7, 8, 5, 2, 1, 6, 0, 3, 9], [0, 7, 2, 8, 3, 9, 1, 4, 6, 5], [7, 5, 0, 3, 6, 4, 2, 8, 1, 9], [7, 3, 4, 0, 5, 2, 6, 1, 8, 9], [7, 4, 5, 1, 0, 2, 8, 6, 3, 9], [7, 2, 8, 5, 6, 9, 0, 3, 4, 1], [4, 5, 7, 0, 3, 1, 2, 6, 8, 9], [7, 1, 2, 6, 8, 9, 0, 3, 5, 4], [1, 4, 5, 9, 6, 3, 2, 0, 7, 8], [1, 7, 2, 0, 8, 6, 9, 3, 5, 4], [3, 1, 5, 7, 4, 0, 6, 2, 8, 9], [4, 1, 7, 0, 2, 8, 5, 3, 6, 9], [1, 7, 6, 2, 0, 9, 8, 3, 4, 5], [7, 4, 8, 2, 0, 3, 1, 6, 9, 5], [8, 7, 2, 4, 0, 1, 3, 9, 5, 6], [7, 4, 5, 0, 3, 2, 1, 9, 8, 6], [5, 7, 2, 8, 6, 3, 0, 9, 1, 4], [1, 0, 8, 3, 6, 9, 2, 5, 7, 4], [7, 2, 4, 1, 0, 6, 8, 3, 9, 5], [4, 7, 2, 1, 6, 8, 5, 9, 3, 0], [0, 4, 5, 3, 6, 1, 9, 7, 2, 8], [4, 3, 0, 5, 1, 7, 8, 2, 9, 6], [4, 7, 2, 0, 9, 5, 1, 8, 3, 6], [5, 4, 3, 1, 2, 8, 7, 9, 6, 0], [7, 1, 2, 6, 8, 3, 0, 5, 4, 9], [1, 7, 3, 2, 8, 6, 5, 4, 0, 9], [7, 2, 3, 5, 8, 0, 6, 9, 1, 4], [1, 5, 0, 6, 4, 3, 7, 2, 8, 9], [7, 3, 2, 6, 1, 0, 5, 8, 9, 4], [0, 3, 9, 1, 5, 6, 8, 7, 2, 4], [0, 1, 8, 6, 9, 2, 3, 7, 4, 5], [7, 4, 5, 2, 1, 8, 0, 6, 9, 3], [7, 2, 8, 5, 9, 6, 1, 0, 4, 3], [5, 7, 0, 6, 8, 4, 2, 3, 9, 1], [1, 3, 6, 9, 5, 0, 7, 8, 2, 4], [1, 7, 4, 2, 8, 6, 5, 0, 9, 3], [4, 5, 0, 1, 3, 7, 2, 6, 8, 9], [5, 7, 4, 3, 2, 8, 0, 6, 9, 1], [0, 9, 6, 8, 1, 3, 4, 5, 7, 2], [4, 7, 2, 1, 0, 3, 6, 8, 9, 5], [7, 6, 8, 4, 5, 9, 2, 3, 0, 1], [7, 3, 8, 1, 0, 2, 9, 6, 5, 4], [4, 0, 7, 2, 5, 8, 6, 9, 1, 3], [1, 7, 2, 5, 8, 9, 6, 4, 0, 3], [1, 6, 8, 2, 0, 3, 7, 9, 4, 5], [7, 1, 2, 8, 0, 4, 5, 3, 6, 9], [7, 5, 0, 2, 1, 6, 8, 9, 3, 4], [7, 1, 0, 2, 4, 5, 8, 3, 6, 9], [5, 2, 6, 9, 7, 8, 1, 4, 3, 0], [7, 2, 4, 5, 8, 3, 0, 1, 6, 9], [6, 7, 2, 9, 8, 0, 3, 1, 5, 4], [7, 2, 0, 1, 6, 8, 9, 3, 4, 5], [0, 4, 7, 2, 5, 8, 1, 9, 3, 6], [7, 0, 4, 2, 1, 8, 5, 3, 6, 9], [2, 7, 4, 0, 5, 9, 1, 3, 8, 6], [3, 7, 0, 1, 2, 8, 6, 9, 4, 5], [1, 7, 2, 5, 6, 8, 9, 0, 3, 4], [7, 2, 1, 5, 4, 3, 0, 8, 9, 6], [8, 2, 3, 0, 7, 9, 6, 5, 4, 1], [0, 8, 3, 2, 7, 6, 1, 9, 5, 4], [7, 2, 8, 1, 5, 0, 6, 4, 3, 9], [2, 1, 7, 8, 9, 0, 5, 4, 6, 3], [2, 4, 7, 5, 0, 3, 8, 1, 6, 9], [5, 4, 0, 1, 2, 7, 6, 9, 3, 8], [8, 6, 7, 5, 1, 2, 3, 0, 4, 9], [5, 1, 7, 2, 0, 6, 8, 9, 4, 3], [1, 5, 8, 0, 7, 2, 3, 4, 9, 6], [1, 4, 5, 6, 3, 0, 9, 7, 2, 8], [5, 2, 7, 3, 8, 6, 0, 9, 4, 1], [4, 7, 2, 5, 0, 1, 3, 8, 6, 9], [4, 7, 2, 5, 0, 3, 8, 1, 6, 9], [5, 0, 3, 7, 2, 1, 8, 6, 9, 4], [7, 5, 2, 4, 0, 3, 9, 8, 1, 6], [4, 1, 5, 3, 7, 0, 2, 6, 8, 9], [0, 4, 1, 6, 3, 5, 7, 8, 9, 2], [7, 4, 5, 3, 1, 6, 2, 0, 9, 8], [7, 5, 1, 6, 0, 3, 2, 8, 9, 4], [2, 7, 8, 3, 9, 0, 1, 6, 5, 4], [5, 0, 3, 7, 4, 1, 6, 8, 9, 2], [4, 7, 6, 2, 8, 1, 5, 3, 0, 9], [7, 2, 8, 5, 4, 6, 1, 3, 0, 9], [5, 7, 2, 3, 6, 8, 0, 9, 1, 4], [6, 1, 5, 2, 8, 0, 7, 9, 3, 4], [7, 5, 0, 2, 8, 3, 6, 9, 1, 4], [7, 2, 5, 4, 8, 1, 0, 3, 9, 6], [8, 7, 0, 5, 3, 2, 6, 9, 1, 4], [4, 0, 5, 1, 2, 7, 3, 9, 6, 8], [2, 4, 5, 7, 9, 8, 0, 3, 6, 1], [5, 7, 2, 1, 9, 8, 6, 3, 0, 4], [4, 1, 0, 2, 7, 9, 5, 6, 8, 3], [5, 8, 3, 7, 1, 0, 4, 2, 9, 6], [4, 1, 7, 0, 8, 5, 2, 6, 3, 9], [7, 2, 6, 8, 4, 3, 0, 5, 1, 9], [2, 1, 3, 0, 8, 9, 6, 7, 5, 4], [0, 2, 7, 1, 8, 3, 9, 6, 5, 4], [0, 7, 3, 6, 4, 5, 2, 9, 1, 8], [7, 5, 1, 2, 0, 4, 8, 6, 9, 3], [7, 2, 5, 0, 4, 6, 1, 3, 8, 9], [7, 1, 4, 2, 6, 8, 0, 3, 5, 9], [1, 7, 4, 2, 0, 8, 5, 6, 3, 9], [4, 5, 7, 0, 8, 3, 6, 2, 1, 9], [7, 2, 5, 0, 1, 3, 4, 9, 8, 6], [7, 2, 0, 5, 1, 8, 6, 3, 9, 4], [6, 0, 8, 9, 2, 3, 5, 7, 1, 4], [2, 7, 0, 1, 8, 6, 3, 9, 5, 4], [4, 5, 7, 8, 0, 1, 9, 3, 2, 6], [6, 0, 1, 9, 4, 3, 7, 5, 8, 2], [7, 0, 5, 4, 9, 2, 3, 6, 8, 1], [7, 2, 5, 1, 4, 8, 3, 0, 6, 9], [1, 5, 7, 8, 4, 2, 6, 0, 9, 3], [7, 5, 1, 4, 2, 3, 8, 0, 6, 9], [0, 3, 1, 8, 2, 7, 9, 4, 5, 6], [7, 2, 8, 0, 5, 4, 9, 3, 6, 1], [9, 8, 3, 5, 2, 6, 4, 0, 1, 7], [7, 4, 1, 6, 2, 8, 0, 5, 3, 9], [5, 7, 2, 0, 8, 9, 6, 3, 4, 1], [4, 7, 6, 2, 1, 5, 3, 9, 0, 8], [2, 8, 3, 5, 0, 1, 4, 6, 9, 7], [1, 7, 4, 8, 5, 2, 3, 9, 0, 6], [0, 7, 9, 1, 4, 2, 5, 8, 3, 6], [3, 7, 2, 5, 0, 4, 8, 6, 1, 9], [1, 5, 0, 7, 2, 3, 6, 8, 9, 4], [1, 0, 3, 8, 7, 5, 4, 9, 2, 6], [6, 8, 2, 0, 3, 9, 4, 5, 7, 1], [7, 2, 8, 5, 6, 9, 3, 0, 4, 1], [7, 2, 5, 8, 6, 1, 9, 0, 3, 4], [7, 5, 4, 3, 8, 2, 9, 0, 1, 6], [4, 5, 7, 1, 0, 2, 8, 3, 6, 9], [7, 4, 5, 3, 0, 1, 8, 2, 9, 6], [7, 2, 1, 5, 4, 6, 3, 0, 8, 9], [7, 4, 0, 3, 5, 1, 8, 2, 6, 9], [1, 4, 0, 3, 6, 9, 7, 2, 8, 5], [3, 0, 8, 9, 6, 7, 2, 4, 1, 5], [4, 5, 3, 7, 2, 0, 9, 8, 6, 1], [3, 0, 7, 2, 5, 4, 6, 1, 8, 9], [7, 4, 5, 2, 8, 1, 3, 0, 9, 6], [5, 6, 3, 7, 2, 1, 0, 9, 8, 4], [7, 1, 0, 2, 6, 8, 9, 4, 5, 3], [5, 0, 2, 6, 9, 3, 8, 7, 4, 1], [4, 0, 5, 2, 7, 8, 3, 1, 6, 9], [2, 0, 7, 3, 6, 8, 5, 1, 9, 4], [9, 5, 0, 8, 6, 2, 7, 3, 1, 4], [2, 6, 7, 0, 8, 3, 9, 1, 4, 5], [5, 7, 9, 8, 3, 2, 6, 0, 4, 1], [0, 5, 7, 2, 8, 3, 1, 6, 9, 4], [7, 1, 2, 4, 6, 0, 9, 8, 3, 5], [3, 0, 7, 5, 6, 9, 1, 8, 2, 4], [4, 7, 0, 3, 2, 1, 6, 9, 5, 8], [5, 4, 6, 7, 3, 8, 1, 2, 0, 9], [7, 2, 6, 0, 3, 1, 8, 9, 4, 5], [2, 1, 3, 5, 4, 8, 7, 0, 9, 6], [2, 9, 0, 5, 3, 8, 1, 6, 7, 4], [4, 7, 8, 5, 1, 9, 2, 3, 0, 6], [5, 7, 1, 2, 3, 8, 4, 0, 9, 6], [6, 1, 4, 0, 7, 2, 5, 8, 9, 3], [7, 4, 2, 5, 1, 8, 3, 6, 9, 0], [7, 2, 0, 5, 4, 6, 8, 3, 1, 9], [1, 7, 0, 2, 3, 6, 4, 8, 9, 5], [4, 7, 2, 1, 5, 8, 0, 6, 9, 3], [5, 7, 2, 8, 1, 6, 3, 0, 9, 4], [3, 7, 8, 2, 1, 5, 0, 9, 6, 4], [7, 4, 8, 1, 2, 0, 5, 9, 6, 3], [5, 0, 7, 6, 8, 2, 1, 9, 3, 4], [4, 7, 1, 0, 2, 5, 8, 9, 3, 6], [4, 7, 1, 2, 8, 5, 0, 3, 6, 9], [7, 4, 5, 2, 8, 6, 0, 3, 1, 9], [7, 5, 1, 4, 2, 3, 0, 8, 9, 6], [5, 7, 2, 1, 6, 4, 3, 8, 0, 9], [0, 3, 2, 7, 6, 8, 1, 9, 5, 4], [7, 1, 0, 2, 4, 3, 8, 6, 5, 9], [0, 7, 2, 5, 1, 4, 3, 8, 6, 9], [4, 1, 7, 3, 0, 6, 2, 5, 8, 9], [5, 4, 7, 1, 6, 9, 8, 0, 2, 3], [1, 7, 2, 4, 0, 3, 8, 9, 6, 5], [4, 0, 5, 1, 9, 3, 7, 8, 2, 6], [2, 3, 7, 6, 8, 5, 4, 0, 1, 9], [4, 6, 0, 3, 7, 1, 2, 5, 8, 9], [7, 5, 0, 4, 1, 8, 2, 3, 6, 9], [7, 3, 1, 0, 2, 5, 4, 8, 9, 6], [2, 7, 4, 5, 3, 9, 0, 1, 6, 8], [6, 7, 5, 8, 2, 0, 4, 1, 3, 9], [7, 2, 8, 5, 4, 1, 3, 0, 6, 9], [1, 5, 7, 2, 8, 6, 3, 9, 0, 4], [7, 8, 2, 6, 0, 1, 5, 3, 4, 9], [7, 5, 3, 2, 6, 0, 8, 4, 1, 9], [5, 2, 7, 8, 0, 3, 1, 6, 9, 4], [1, 4, 5, 6, 2, 7, 3, 0, 8, 9], [5, 7, 4, 6, 2, 8, 1, 3, 0, 9], [1, 0, 3, 2, 7, 4, 8, 5, 9, 6], [7, 4, 2, 5, 0, 3, 1, 8, 6, 9], [1, 2, 8, 0, 7, 5, 4, 6, 3, 9], [1, 7, 3, 2, 0, 8, 6, 9, 4, 5], [4, 7, 5, 2, 8, 1, 0, 3, 9, 6], [7, 3, 4, 5, 2, 1, 0, 9, 8, 6], [7, 4, 1, 3, 2, 0, 5, 8, 9, 6], [7, 2, 0, 8, 5, 3, 9, 4, 1, 6], [5, 6, 0, 2, 7, 9, 8, 1, 4, 3], [1, 7, 4, 8, 6, 9, 3, 2, 0, 5], [0, 1, 3, 6, 9, 2, 7, 8, 5, 4], [7, 2, 5, 1, 3, 0, 8, 6, 9, 4], [1, 7, 2, 6, 4, 5, 0, 3, 8, 9], [3, 8, 2, 9, 0, 1, 7, 6, 5, 4], [7, 0, 5, 3, 2, 9, 4, 8, 1, 6], [7, 4, 2, 0, 3, 8, 9, 6, 1, 5], [7, 5, 9, 6, 2, 1, 4, 3, 8, 0], [0, 7, 2, 1, 3, 5, 6, 8, 9, 4], [7, 2, 8, 5, 6, 3, 0, 1, 9, 4], [1, 7, 2, 3, 8, 0, 6, 4, 9, 5], [7, 2, 8, 5, 6, 0, 9, 4, 1, 3], [3, 9, 0, 8, 6, 2, 7, 1, 5, 4], [7, 8, 0, 2, 9, 3, 6, 1, 5, 4], [2, 8, 7, 3, 0, 5, 1, 9, 4, 6], [9, 6, 0, 2, 7, 8, 1, 3, 5, 4], [5, 7, 4, 2, 3, 0, 8, 6, 9, 1], [0, 7, 8, 2, 4, 5, 3, 1, 6, 9], [7, 8, 3, 1, 5, 2, 0, 9, 6, 4], [0, 3, 6, 1, 2, 5, 8, 4, 7, 9], [4, 5, 7, 2, 0, 3, 8, 1, 6, 9], [1, 4, 5, 7, 0, 2, 3, 8, 6, 9], [7, 1, 8, 0, 4, 5, 9, 2, 6, 3], [7, 2, 8, 6, 0, 3, 4, 1, 5, 9], [4, 1, 3, 8, 5, 0, 2, 9, 6, 7], [7, 5, 4, 2, 0, 1, 3, 6, 8, 9], [5, 7, 8, 3, 2, 1, 9, 0, 6, 4], [7, 5, 4, 1, 2, 0, 3, 6, 8, 9], [2, 1, 3, 0, 7, 4, 8, 5, 6, 9], [3, 7, 2, 5, 8, 1, 0, 6, 9, 4], [7, 2, 8, 0, 5, 3, 1, 9, 6, 4], [4, 7, 5, 2, 8, 1, 0, 3, 6, 9], [1, 7, 2, 0, 4, 5, 3, 6, 8, 9], [4, 5, 0, 3, 7, 6, 9, 2, 1, 8], [1, 7, 8, 3, 6, 5, 4, 9, 2, 0], [1, 7, 0, 3, 8, 2, 9, 5, 6, 4], [1, 0, 7, 3, 5, 6, 4, 8, 2, 9], [1, 0, 2, 7, 8, 4, 5, 6, 9, 3], [7, 5, 0, 2, 8, 9, 6, 1, 3, 4], [1, 0, 5, 6, 9, 3, 8, 2, 4, 7], [7, 2, 0, 3, 4, 5, 1, 8, 6, 9], [0, 3, 9, 2, 6, 7, 8, 5, 4, 1], [7, 5, 4, 1, 2, 3, 9, 0, 8, 6], [6, 1, 7, 2, 3, 8, 5, 0, 4, 9], [3, 1, 8, 0, 2, 6, 9, 7, 4, 5], [5, 2, 7, 6, 1, 3, 0, 8, 9, 4], [0, 7, 2, 6, 1, 8, 9, 3, 5, 4], [7, 0, 4, 5, 3, 2, 1, 9, 6, 8], [1, 7, 4, 3, 0, 2, 8, 9, 6, 5], [0, 2, 1, 3, 8, 5, 7, 6, 9, 4], [7, 4, 5, 0, 2, 1, 8, 9, 6, 3], [7, 6, 8, 5, 2, 9, 0, 1, 3, 4], [7, 4, 5, 1, 3, 0, 8, 2, 9, 6], [7, 8, 2, 0, 1, 3, 6, 9, 5, 4], [7, 4, 2, 8, 1, 6, 5, 0, 9, 3], [5, 4, 1, 7, 2, 8, 3, 0, 6, 9], [1, 7, 8, 6, 2, 9, 3, 5, 0, 4], [5, 0, 2, 3, 9, 8, 6, 7, 1, 4], [1, 5, 3, 8, 2, 7, 0, 6, 9, 4], [7, 4, 3, 2, 0, 8, 1, 6, 9, 5], [1, 5, 4, 2, 7, 8, 3, 6, 0, 9], [1, 0, 3, 7, 2, 8, 5, 4, 6, 9], [3, 0, 1, 6, 9, 8, 7, 2, 5, 4], [2, 7, 8, 0, 3, 1, 5, 9, 6, 4], [1, 2, 8, 3, 9, 4, 5, 7, 0, 6], [4, 3, 7, 2, 1, 8, 0, 5, 9, 6], [5, 4, 7, 6, 3, 1, 8, 2, 0, 9], [4, 7, 6, 5, 2, 1, 8, 9, 3, 0], [9, 0, 6, 2, 3, 1, 7, 8, 4, 5], [0, 5, 7, 4, 3, 2, 9, 6, 8, 1], [0, 4, 5, 7, 2, 6, 9, 8, 3, 1], [6, 7, 0, 2, 8, 5, 1, 9, 3, 4], [6, 8, 2, 0, 7, 5, 3, 4, 9, 1], [3, 0, 1, 7, 2, 5, 9, 8, 4, 6], [7, 5, 4, 1, 3, 2, 8, 0, 9, 6], [2, 3, 8, 0, 1, 6, 7, 5, 9, 4], [0, 7, 2, 4, 8, 5, 6, 3, 1, 9], [6, 0, 9, 3, 1, 8, 7, 5, 2, 4], [3, 7, 8, 2, 0, 1, 5, 9, 4, 6], [7, 4, 5, 2, 1, 3, 0, 8, 9, 6], [3, 7, 5, 4, 0, 2, 9, 8, 1, 6], [7, 2, 5, 4, 8, 1, 3, 6, 0, 9], [1, 7, 2, 3, 6, 0, 9, 8, 5, 4], [4, 7, 5, 1, 2, 0, 6, 8, 3, 9], [4, 3, 8, 6, 1, 9, 2, 7, 0, 5], [2, 5, 3, 7, 1, 8, 0, 6, 9, 4], [1, 8, 7, 2, 0, 4, 5, 3, 6, 9], [7, 2, 8, 4, 5, 3, 9, 0, 1, 6], [2, 0, 7, 3, 5, 4, 1, 6, 8, 9], [9, 4, 5, 3, 1, 0, 2, 8, 7, 6], [5, 2, 0, 7, 1, 8, 6, 3, 4, 9], [6, 9, 3, 8, 0, 1, 5, 7, 2, 4], [4, 3, 0, 7, 2, 8, 9, 1, 5, 6], [7, 3, 5, 0, 2, 8, 9, 6, 4, 1], [1, 5, 4, 3, 0, 7, 2, 6, 8, 9], [7, 3, 1, 2, 0, 5, 4, 6, 8, 9], [2, 7, 6, 1, 5, 8, 0, 9, 3, 4], [7, 2, 5, 6, 0, 3, 8, 9, 1, 4], [6, 9, 1, 5, 2, 7, 3, 8, 4, 0], [1, 0, 2, 7, 8, 6, 3, 9, 5, 4], [3, 5, 8, 2, 4, 7, 0, 1, 9, 6], [2, 0, 3, 9, 8, 1, 5, 6, 7, 4], [7, 1, 4, 3, 6, 2, 5, 9, 0, 8], [1, 5, 3, 4, 0, 7, 2, 6, 8, 9], [2, 7, 0, 1, 3, 8, 4, 5, 6, 9], [5, 3, 4, 7, 8, 6, 1, 0, 2, 9], [7, 2, 8, 0, 3, 9, 6, 5, 1, 4], [4, 5, 2, 8, 7, 3, 1, 6, 0, 9], [7, 8, 2, 1, 6, 0, 9, 3, 5, 4], [2, 4, 1, 3, 6, 7, 8, 5, 0, 9], [5, 8, 1, 0, 2, 6, 7, 9, 3, 4], [7, 5, 4, 8, 3, 0, 1, 2, 9, 6], [1, 7, 2, 5, 4, 6, 8, 0, 3, 9], [6, 0, 3, 1, 9, 2, 5, 4, 8, 7], [4, 5, 7, 2, 0, 9, 6, 8, 3, 1], [5, 6, 0, 2, 7, 3, 4, 8, 1, 9], [4, 7, 5, 1, 8, 2, 3, 9, 6, 0], [7, 1, 4, 5, 2, 0, 3, 8, 9, 6], [7, 4, 2, 8, 5, 1, 6, 0, 3, 9], [7, 2, 8, 0, 3, 9, 1, 6, 4, 5], [0, 7, 5, 4, 3, 2, 1, 6, 8, 9], [8, 0, 6, 2, 7, 1, 9, 5, 3, 4], [7, 2, 1, 4, 8, 3, 6, 9, 5, 0], [1, 7, 6, 5, 8, 4, 3, 0, 2, 9], [5, 4, 7, 1, 0, 6, 2, 8, 3, 9], [3, 7, 2, 6, 8, 9, 0, 4, 5, 1], [7, 4, 1, 5, 2, 3, 0, 8, 6, 9], [1, 7, 4, 6, 3, 5, 9, 2, 8, 0], [2, 0, 3, 7, 9, 4, 8, 1, 6, 5], [7, 5, 2, 8, 1, 6, 0, 9, 4, 3], [5, 2, 4, 0, 7, 8, 1, 3, 6, 9], [7, 2, 8, 1, 0, 4, 3, 5, 6, 9], [7, 5, 2, 1, 0, 3, 6, 8, 9, 4], [5, 4, 7, 1, 2, 3, 0, 6, 8, 9], [7, 0, 9, 2, 4, 3, 5, 1, 8, 6], [7, 2, 4, 5, 3, 0, 8, 1, 9, 6], [7, 5, 1, 2, 4, 6, 8, 3, 0, 9], [7, 5, 4, 8, 2, 0, 1, 6, 9, 3], [0, 8, 2, 7, 1, 3, 4, 5, 6, 9], [5, 2, 7, 4, 1, 0, 8, 6, 3, 9], [3, 8, 9, 0, 2, 7, 5, 1, 4, 6], [0, 1, 5, 8, 4, 7, 9, 2, 3, 6], [7, 0, 2, 8, 4, 1, 5, 9, 3, 6], [7, 2, 5, 6, 1, 8, 9, 0, 3, 4], [2, 8, 9, 6, 0, 7, 3, 1, 5, 4], [4, 8, 0, 5, 9, 2, 7, 3, 1, 6], [1, 7, 4, 5, 2, 3, 8, 6, 9, 0], [6, 2, 7, 3, 0, 9, 8, 5, 4, 1], [0, 3, 1, 6, 8, 5, 2, 9, 7, 4], [7, 8, 9, 2, 4, 3, 1, 0, 5, 6], [5, 7, 0, 1, 8, 2, 3, 6, 9, 4], [7, 3, 4, 2, 0, 5, 8, 6, 9, 1], [6, 5, 7, 8, 2, 0, 3, 9, 1, 4], [6, 7, 2, 0, 3, 9, 8, 5, 1, 4], [3, 1, 5, 7, 9, 2, 8, 6, 4, 0], [0, 7, 5, 8, 2, 1, 9, 3, 6, 4], [0, 3, 2, 7, 8, 5, 1, 9, 6, 4], [7, 2, 8, 0, 1, 3, 6, 9, 4, 5], [6, 5, 7, 1, 9, 8, 2, 4, 0, 3], [7, 4, 5, 1, 2, 0, 8, 3, 9, 6], [7, 4, 5, 1, 0, 3, 2, 6, 8, 9], [0, 1, 6, 4, 3, 7, 5, 2, 9, 8], [7, 4, 0, 3, 5, 2, 6, 8, 1, 9], [1, 4, 7, 6, 2, 9, 5, 8, 0, 3], [6, 1, 7, 8, 9, 2, 0, 5, 4, 3], [4, 0, 3, 5, 7, 2, 8, 1, 9, 6], [1, 7, 3, 4, 5, 6, 0, 2, 8, 9], [5, 6, 0, 4, 3, 1, 7, 9, 8, 2], [7, 1, 0, 3, 2, 8, 9, 6, 4, 5], [5, 2, 3, 0, 7, 1, 8, 4, 9, 6], [4, 1, 7, 3, 0, 2, 8, 6, 9, 5], [7, 3, 5, 4, 0, 8, 2, 1, 9, 6], [0, 7, 2, 5, 8, 6, 1, 3, 9, 4], [2, 4, 1, 5, 0, 8, 6, 7, 3, 9], [3, 0, 2, 1, 7, 9, 6, 8, 5, 4], [4, 2, 0, 3, 5, 1, 7, 8, 6, 9], [0, 3, 4, 6, 9, 5, 7, 8, 2, 1], [7, 0, 3, 2, 8, 9, 1, 6, 5, 4], [4, 7, 0, 1, 3, 2, 8, 6, 9, 5], [1, 0, 7, 6, 2, 5, 4, 3, 8, 9], [5, 0, 3, 6, 9, 4, 8, 2, 7, 1], [7, 5, 4, 0, 2, 3, 8, 1, 6, 9], [4, 7, 1, 8, 2, 5, 6, 3, 0, 9], [1, 7, 2, 3, 9, 8, 0, 6, 4, 5], [7, 2, 0, 5, 8, 9, 6, 1, 3, 4], [3, 4, 2, 7, 8, 5, 1, 6, 0, 9], [5, 4, 0, 7, 2, 8, 1, 3, 6, 9], [4, 1, 0, 6, 5, 8, 3, 9, 2, 7], [4, 7, 5, 6, 8, 0, 2, 3, 1, 9], [0, 5, 3, 6, 7, 9, 4, 2, 8, 1], [7, 8, 5, 2, 1, 4, 6, 3, 0, 9], [3, 1, 4, 7, 2, 6, 0, 5, 8, 9], [4, 5, 2, 7, 8, 0, 3, 9, 1, 6], [7, 1, 2, 8, 3, 6, 0, 9, 5, 4], [4, 1, 3, 7, 8, 5, 0, 2, 6, 9], [7, 2, 8, 4, 0, 5, 9, 3, 1, 6], [5, 0, 2, 1, 8, 9, 7, 4, 3, 6], [1, 0, 5, 3, 6, 9, 4, 8, 7, 2], [7, 2, 4, 0, 1, 8, 3, 6, 9, 5], [3, 0, 6, 9, 2, 8, 7, 4, 1, 5], [2, 8, 7, 1, 0, 6, 3, 9, 5, 4], [0, 2, 8, 5, 3, 7, 9, 1, 4, 6], [5, 4, 0, 7, 3, 8, 2, 1, 6, 9], [1, 6, 7, 2, 5, 8, 9, 0, 3, 4], [1, 0, 6, 2, 7, 3, 8, 9, 4, 5], [7, 5, 1, 6, 2, 3, 8, 0, 9, 4], [2, 0, 3, 8, 5, 6, 4, 9, 1, 7], [1, 2, 7, 9, 6, 0, 3, 4, 8, 5], [5, 7, 4, 8, 0, 2, 3, 1, 9, 6], [0, 7, 2, 1, 5, 8, 6, 3, 4, 9], [2, 7, 8, 1, 0, 3, 5, 6, 9, 4], [7, 2, 9, 0, 3, 8, 1, 4, 6, 5], [1, 7, 2, 8, 5, 0, 3, 9, 6, 4], [5, 7, 4, 2, 1, 3, 0, 8, 6, 9], [0, 7, 2, 1, 5, 4, 3, 6, 8, 9], [7, 4, 5, 6, 1, 0, 2, 3, 8, 9], [2, 1, 3, 8, 0, 5, 7, 4, 6, 9], [7, 0, 4, 1, 5, 2, 3, 6, 8, 9], [0, 1, 4, 7, 3, 5, 2, 8, 6, 9], [0, 2, 9, 8, 7, 3, 1, 6, 5, 4], [7, 0, 2, 5, 3, 8, 1, 9, 4, 6], [7, 5, 4, 8, 1, 2, 6, 3, 9, 0], [2, 8, 3, 9, 6, 4, 1, 0, 5, 7], [7, 2, 1, 8, 0, 5, 6, 4, 3, 9], [1, 7, 4, 2, 3, 6, 8, 5, 0, 9], [7, 6, 2, 8, 0, 5, 1, 9, 4, 3], [3, 2, 1, 8, 4, 9, 7, 0, 6, 5], [1, 2, 7, 4, 6, 5, 0, 8, 3, 9], [4, 7, 5, 2, 8, 3, 0, 9, 1, 6], [5, 7, 2, 3, 8, 4, 9, 0, 6, 1], [0, 9, 1, 4, 5, 3, 8, 6, 2, 7], [2, 7, 0, 8, 9, 5, 6, 4, 3, 1], [5, 7, 4, 0, 3, 2, 8, 1, 9, 6], [3, 1, 5, 2, 7, 4, 8, 6, 0, 9], [2, 8, 0, 3, 7, 9, 1, 6, 4, 5], [1, 7, 5, 2, 6, 0, 3, 9, 8, 4], [7, 5, 0, 3, 8, 2, 6, 1, 4, 9], [4, 7, 0, 8, 2, 6, 3, 9, 1, 5], [1, 3, 2, 4, 0, 7, 9, 5, 8, 6], [1, 7, 2, 3, 0, 8, 5, 4, 6, 9], [7, 4, 2, 0, 8, 3, 9, 1, 6, 5], [4, 5, 7, 2, 0, 3, 8, 9, 1, 6], [3, 1, 7, 8, 6, 2, 9, 5, 4, 0], [0, 3, 5, 6, 7, 9, 2, 8, 4, 1], [4, 7, 2, 1, 5, 8, 0, 3, 9, 6], [7, 4, 5, 0, 2, 1, 8, 6, 3, 9], [7, 4, 2, 5, 3, 8, 6, 0, 1, 9], [7, 4, 0, 2, 5, 3, 1, 8, 6, 9], [7, 2, 0, 6, 9, 8, 4, 1, 3, 5], [1, 4, 3, 6, 2, 8, 9, 0, 7, 5], [0, 1, 7, 2, 3, 5, 8, 9, 6, 4], [5, 1, 4, 7, 8, 2, 6, 9, 3, 0], [0, 7, 2, 8, 1, 3, 6, 9, 5, 4], [0, 5, 7, 3, 4, 2, 9, 8, 6, 1], [2, 1, 8, 3, 6, 5, 4, 9, 7, 0], [5, 6, 3, 7, 2, 8, 0, 1, 9, 4], [8, 2, 3, 9, 6, 0, 7, 1, 4, 5], [5, 0, 1, 7, 4, 9, 6, 3, 2, 8], [7, 4, 5, 0, 2, 1, 6, 8, 3, 9], [7, 3, 2, 1, 5, 0, 4, 8, 9, 6], [1, 7, 6, 0, 2, 3, 4, 8, 5, 9], [2, 7, 1, 8, 4, 5, 3, 0, 6, 9], [4, 3, 1, 0, 5, 6, 8, 9, 7, 2], [0, 8, 2, 3, 7, 9, 5, 4, 6, 1], [2, 7, 5, 4, 0, 1, 3, 8, 6, 9], [0, 4, 5, 2, 1, 7, 3, 8, 9, 6], [4, 5, 1, 6, 7, 8, 3, 0, 2, 9], [7, 4, 0, 2, 5, 8, 6, 3, 9, 1], [7, 1, 2, 0, 5, 4, 6, 8, 3, 9], [7, 4, 5, 2, 1, 3, 0, 8, 6, 9], [7, 2, 5, 4, 0, 1, 3, 6, 8, 9], [5, 0, 7, 3, 1, 6, 8, 2, 9, 4], [7, 2, 0, 3, 8, 9, 6, 1, 5, 4], [0, 7, 5, 4, 2, 1, 8, 6, 3, 9], [3, 8, 2, 0, 6, 9, 7, 1, 4, 5], [2, 7, 1, 3, 0, 4, 8, 5, 6, 9], [7, 1, 0, 4, 3, 5, 8, 2, 9, 6], [4, 7, 2, 3, 6, 5, 8, 9, 0, 1], [7, 0, 6, 5, 1, 4, 2, 9, 8, 3], [2, 3, 8, 5, 0, 1, 6, 9, 7, 4], [7, 8, 2, 5, 6, 3, 0, 1, 4, 9], [2, 7, 1, 5, 8, 4, 3, 6, 0, 9], [4, 5, 0, 7, 2, 1, 6, 3, 8, 9], [3, 2, 8, 9, 7, 0, 4, 1, 5, 6], [1, 8, 2, 6, 0, 7, 5, 9, 4, 3], [0, 7, 1, 2, 6, 8, 5, 3, 9, 4], [4, 5, 7, 2, 8, 3, 1, 6, 0, 9], [7, 3, 0, 2, 8, 5, 1, 6, 9, 4], [4, 1, 7, 3, 0, 2, 8, 9, 5, 6], [4, 7, 8, 3, 5, 1, 2, 6, 0, 9], [5, 8, 9, 0, 1, 3, 2, 6, 4, 7], [5, 0, 9, 3, 6, 7, 2, 8, 1, 4], [7, 1, 2, 8, 5, 6, 0, 4, 3, 9], [2, 7, 3, 0, 6, 4, 8, 5, 9, 1], [2, 4, 7, 8, 3, 6, 5, 0, 9, 1], [0, 4, 7, 5, 2, 3, 8, 6, 9, 1], [0, 4, 1, 5, 3, 2, 7, 9, 6, 8], [1, 6, 7, 4, 5, 9, 3, 8, 2, 0], [3, 7, 2, 0, 8, 4, 5, 9, 1, 6], [5, 0, 1, 3, 7, 2, 8, 9, 4, 6], [1, 2, 8, 5, 6, 7, 0, 3, 9, 4], [1, 5, 4, 2, 7, 6, 8, 0, 9, 3], [2, 7, 0, 6, 3, 8, 1, 9, 5, 4], [5, 7, 4, 2, 6, 8, 0, 1, 3, 9], [3, 7, 1, 9, 0, 6, 8, 2, 4, 5], [7, 4, 3, 6, 1, 5, 0, 2, 9, 8], [7, 4, 1, 0, 8, 5, 6, 2, 9, 3], [4, 7, 1, 2, 5, 8, 0, 3, 6, 9], [7, 5, 1, 8, 4, 2, 0, 3, 6, 9], [7, 2, 8, 1, 4, 0, 3, 5, 6, 9], [7, 2, 4, 5, 1, 0, 8, 3, 9, 6], [4, 1, 7, 2, 5, 6, 3, 0, 8, 9], [7, 2, 0, 1, 3, 6, 8, 9, 5, 4], [2, 7, 5, 0, 8, 1, 3, 6, 9, 4], [4, 7, 2, 5, 3, 6, 0, 8, 1, 9], [1, 3, 2, 6, 8, 5, 7, 4, 0, 9], [7, 5, 4, 8, 2, 1, 3, 0, 9, 6], [3, 8, 1, 5, 0, 2, 7, 6, 9, 4], [7, 1, 0, 4, 2, 5, 6, 3, 8, 9], [4, 7, 1, 6, 5, 0, 3, 8, 2, 9], [1, 7, 2, 0, 8, 3, 9, 6, 5, 4], [4, 7, 1, 5, 2, 0, 8, 6, 9, 3], [1, 5, 6, 2, 7, 0, 3, 9, 8, 4], [0, 1, 7, 3, 5, 2, 6, 8, 9, 4], [7, 2, 4, 5, 1, 3, 8, 0, 9, 6], [5, 1, 2, 7, 0, 6, 9, 8, 3, 4], [4, 5, 1, 7, 8, 2, 0, 6, 3, 9], [4, 6, 7, 2, 0, 5, 9, 1, 8, 3], [4, 7, 5, 1, 2, 8, 9, 3, 6, 0], [7, 2, 5, 8, 3, 1, 9, 6, 0, 4], [1, 2, 3, 9, 5, 8, 4, 7, 6, 0], [1, 6, 3, 0, 2, 8, 5, 9, 7, 4], [4, 5, 1, 3, 0, 7, 8, 2, 9, 6], [0, 3, 1, 6, 5, 9, 2, 8, 7, 4], [0, 1, 6, 9, 5, 7, 4, 8, 2, 3], [8, 1, 7, 4, 2, 5, 6, 0, 9, 3], [4, 5, 7, 1, 8, 0, 6, 2, 9, 3], [9, 3, 0, 6, 7, 2, 8, 4, 1, 5], [5, 7, 4, 9, 3, 1, 2, 0, 8, 6], [4, 7, 3, 6, 1, 9, 5, 8, 0, 2], [7, 5, 0, 4, 2, 1, 8, 3, 6, 9], [0, 6, 7, 1, 2, 8, 9, 3, 5, 4], [0, 1, 2, 7, 8, 9, 6, 3, 5, 4], [1, 0, 6, 9, 3, 5, 4, 8, 7, 2], [8, 3, 0, 7, 5, 1, 2, 9, 6, 4], [7, 3, 4, 2, 8, 1, 0, 6, 5, 9], [4, 7, 8, 2, 0, 9, 3, 1, 6, 5], [7, 2, 3, 0, 4, 8, 5, 6, 1, 9], [7, 1, 5, 2, 0, 8, 6, 9, 3, 4], [2, 8, 5, 6, 0, 9, 7, 4, 1, 3], [7, 3, 4, 5, 0, 2, 8, 9, 6, 1], [7, 1, 5, 8, 2, 3, 4, 9, 6, 0], [3, 2, 7, 8, 0, 9, 1, 5, 6, 4], [2, 8, 7, 0, 6, 9, 1, 5, 4, 3], [3, 7, 0, 5, 4, 1, 6, 8, 9, 2], [2, 0, 8, 3, 1, 7, 9, 6, 5, 4], [4, 0, 5, 7, 1, 3, 2, 8, 6, 9], [5, 8, 7, 2, 0, 1, 6, 9, 4, 3], [4, 5, 7, 1, 2, 0, 8, 3, 9, 6], [1, 5, 7, 2, 9, 8, 6, 0, 3, 4], [0, 1, 6, 3, 8, 7, 4, 2, 5, 9], [4, 1, 2, 0, 7, 8, 6, 3, 9, 5], [7, 2, 3, 8, 1, 5, 0, 9, 6, 4], [4, 5, 1, 7, 8, 0, 3, 2, 9, 6], [6, 0, 9, 5, 3, 1, 2, 8, 7, 4], [1, 3, 0, 6, 8, 4, 5, 2, 9, 7], [5, 2, 6, 4, 8, 9, 0, 7, 3, 1], [7, 5, 4, 1, 8, 2, 0, 3, 6, 9], [8, 4, 5, 3, 1, 2, 6, 9, 0, 7], [4, 7, 2, 0, 3, 8, 6, 9, 1, 5], [4, 7, 1, 2, 5, 0, 3, 8, 6, 9], [7, 4, 2, 1, 3, 0, 5, 6, 8, 9], [7, 2, 6, 8, 5, 0, 1, 4, 3, 9], [2, 7, 8, 0, 3, 1, 5, 4, 9, 6], [7, 5, 4, 3, 0, 1, 6, 2, 8, 9], [7, 2, 0, 8, 3, 1, 6, 5, 9, 4], [4, 5, 7, 2, 0, 1, 3, 8, 6, 9], [3, 7, 0, 6, 8, 2, 5, 9, 4, 1], [7, 4, 1, 2, 6, 0, 5, 3, 8, 9], [5, 7, 4, 2, 3, 0, 8, 1, 6, 9], [7, 1, 5, 2, 6, 0, 9, 8, 3, 4], [4, 2, 3, 7, 5, 1, 8, 0, 6, 9], [7, 4, 5, 8, 2, 6, 3, 0, 1, 9], [7, 8, 3, 6, 2, 5, 4, 0, 1, 9], [5, 7, 4, 1, 2, 0, 3, 9, 8, 6], [0, 4, 3, 6, 9, 8, 2, 7, 5, 1], [1, 5, 0, 3, 7, 8, 6, 9, 2, 4], [4, 3, 7, 5, 2, 0, 6, 1, 9, 8], [1, 4, 0, 5, 7, 6, 2, 8, 9, 3], [4, 7, 5, 0, 2, 8, 3, 1, 6, 9], [5, 2, 7, 6, 4, 8, 9, 3, 1, 0], [7, 6, 0, 2, 1, 8, 3, 9, 4, 5], [1, 9, 8, 7, 6, 2, 0, 3, 5, 4], [7, 5, 1, 8, 4, 3, 2, 0, 9, 6], [7, 3, 1, 0, 2, 6, 8, 9, 5, 4], [7, 4, 8, 2, 5, 3, 0, 1, 9, 6], [7, 0, 3, 1, 2, 8, 9, 6, 5, 4], [3, 6, 0, 9, 1, 5, 7, 8, 2, 4], [5, 1, 4, 3, 6, 7, 0, 9, 8, 2], [4, 7, 3, 0, 5, 1, 2, 8, 9, 6], [1, 2, 0, 7, 6, 9, 3, 5, 8, 4], [2, 8, 0, 6, 9, 5, 3, 1, 7, 4], [4, 5, 7, 1, 0, 3, 8, 2, 6, 9], [7, 1, 2, 5, 4, 6, 0, 8, 3, 9], [1, 7, 4, 5, 0, 3, 6, 8, 2, 9], [7, 4, 5, 0, 2, 1, 3, 8, 9, 6], [7, 1, 6, 0, 2, 4, 3, 9, 5, 8], [0, 7, 2, 8, 1, 9, 3, 6, 4, 5], [7, 1, 5, 3, 4, 2, 8, 0, 9, 6], [2, 8, 5, 7, 0, 1, 9, 6, 3, 4], [9, 8, 7, 6, 2, 1, 0, 3, 5, 4], [7, 2, 0, 4, 5, 3, 1, 6, 8, 9], [4, 5, 7, 1, 2, 8, 6, 9, 0, 3], [7, 5, 0, 1, 6, 8, 2, 3, 9, 4], [5, 7, 4, 3, 8, 0, 6, 2, 1, 9], [7, 2, 6, 8, 9, 3, 1, 0, 5, 4], [4, 7, 0, 5, 2, 1, 6, 8, 3, 9], [5, 7, 3, 0, 8, 6, 9, 1, 2, 4], [6, 2, 9, 8, 3, 0, 7, 5, 1, 4], [0, 2, 7, 3, 5, 1, 6, 4, 9, 8], [0, 5, 6, 9, 3, 1, 8, 2, 7, 4], [5, 7, 2, 8, 0, 1, 9, 6, 3, 4], [4, 0, 2, 5, 1, 7, 3, 8, 6, 9], [1, 2, 5, 0, 7, 3, 9, 8, 6, 4], [1, 2, 3, 4, 5, 7, 0, 6, 8, 9], [7, 4, 5, 0, 2, 8, 1, 3, 6, 9], [5, 2, 0, 1, 8, 4, 9, 7, 3, 6], [4, 5, 7, 0, 2, 1, 8, 3, 9, 6], [2, 7, 3, 1, 5, 8, 4, 0, 6, 9], [5, 1, 2, 7, 6, 0, 3, 8, 9, 4], [4, 5, 0, 2, 3, 1, 7, 8, 6, 9], [7, 4, 5, 2, 0, 8, 1, 6, 3, 9], [0, 3, 5, 4, 9, 2, 8, 7, 6, 1], [4, 7, 2, 0, 1, 8, 9, 6, 5, 3], [7, 2, 8, 0, 6, 1, 9, 3, 5, 4], [7, 0, 2, 9, 8, 4, 6, 5, 3, 1], [2, 7, 8, 0, 5, 3, 4, 9, 6, 1], [7, 2, 8, 5, 0, 1, 6, 9, 3, 4], [7, 3, 0, 2, 8, 1, 4, 5, 9, 6], [4, 5, 1, 3, 6, 9, 0, 7, 8, 2], [5, 0, 1, 3, 7, 6, 2, 9, 8, 4], [2, 5, 8, 7, 9, 4, 0, 3, 6, 1], [7, 8, 0, 2, 5, 4, 9, 3, 1, 6], [4, 7, 0, 5, 8, 1, 3, 2, 9, 6], [7, 2, 0, 4, 5, 9, 3, 6, 1, 8], [7, 5, 3, 2, 1, 0, 8, 4, 6, 9], [8, 1, 2, 6, 5, 7, 4, 9, 3, 0], [7, 4, 2, 5, 8, 0, 9, 3, 1, 6], [7, 4, 5, 0, 3, 2, 1, 8, 9, 6], [4, 7, 2, 5, 8, 3, 6, 1, 0, 9], [7, 2, 3, 8, 9, 0, 6, 1, 4, 5], [4, 7, 1, 0, 5, 3, 2, 8, 9, 6], [5, 3, 7, 0, 2, 4, 8, 9, 6, 1], [5, 9, 4, 7, 1, 2, 8, 0, 6, 3], [7, 4, 3, 5, 8, 2, 9, 0, 1, 6], [3, 2, 7, 8, 0, 6, 9, 1, 5, 4], [7, 2, 0, 4, 6, 3, 9, 8, 5, 1], [4, 1, 7, 6, 5, 0, 3, 2, 8, 9], [0, 4, 3, 1, 2, 7, 6, 8, 9, 5], [3, 0, 5, 9, 8, 6, 7, 1, 2, 4], [6, 7, 2, 0, 8, 9, 1, 3, 5, 4], [7, 1, 2, 8, 4, 5, 6, 3, 9, 0], [2, 0, 9, 6, 3, 8, 5, 7, 1, 4], [0, 3, 6, 9, 8, 5, 1, 2, 4, 7], [1, 0, 4, 5, 2, 3, 6, 8, 7, 9], [7, 2, 0, 1, 8, 3, 4, 5, 6, 9], [6, 1, 0, 5, 3, 2, 8, 9, 7, 4], [5, 4, 6, 1, 7, 8, 3, 0, 2, 9], [4, 5, 0, 1, 7, 3, 2, 8, 9, 6], [1, 7, 2, 9, 6, 3, 8, 0, 5, 4], [7, 1, 5, 2, 0, 8, 3, 9, 6, 4], [7, 2, 1, 5, 0, 3, 6, 4, 8, 9], [5, 4, 0, 8, 1, 7, 2, 6, 9, 3], [0, 6, 7, 2, 8, 3, 1, 9, 5, 4], [1, 6, 0, 2, 9, 7, 8, 3, 4, 5], [0, 7, 2, 8, 3, 1, 6, 9, 5, 4], [7, 3, 2, 0, 8, 9, 6, 1, 5, 4], [5, 8, 2, 7, 6, 9, 1, 3, 0, 4], [7, 1, 6, 3, 9, 2, 4, 8, 5, 0], [0, 7, 4, 5, 2, 1, 8, 6, 9, 3], [7, 4, 9, 5, 1, 2, 3, 6, 0, 8], [5, 0, 4, 7, 9, 8, 2, 3, 1, 6], [2, 5, 7, 1, 8, 6, 9, 0, 3, 4], [7, 2, 0, 5, 4, 1, 8, 6, 3, 9], [1, 7, 8, 9, 2, 0, 3, 5, 4, 6], [7, 0, 2, 3, 8, 1, 4, 6, 9, 5], [4, 3, 2, 7, 0, 8, 9, 6, 5, 1], [7, 4, 0, 1, 6, 9, 3, 8, 2, 5], [7, 5, 4, 1, 0, 2, 8, 3, 9, 6], [7, 2, 4, 5, 8, 1, 0, 6, 3, 9], [2, 3, 4, 7, 5, 8, 1, 6, 0, 9], [7, 1, 5, 4, 2, 6, 8, 9, 0, 3], [0, 3, 2, 5, 4, 8, 7, 6, 9, 1], [4, 5, 7, 2, 1, 3, 6, 0, 8, 9], [4, 7, 2, 0, 5, 3, 8, 6, 9, 1], [4, 5, 0, 7, 2, 1, 6, 8, 3, 9], [7, 8, 2, 5, 9, 4, 3, 0, 6, 1], [7, 1, 4, 5, 8, 2, 9, 3, 0, 6], [1, 7, 4, 0, 5, 8, 2, 3, 6, 9], [4, 5, 0, 6, 2, 7, 8, 9, 1, 3], [6, 9, 7, 2, 8, 3, 0, 5, 1, 4], [7, 0, 4, 2, 3, 5, 1, 8, 9, 6], [1, 7, 4, 8, 5, 2, 6, 3, 0, 9], [7, 0, 2, 8, 3, 5, 9, 4, 1, 6], [7, 5, 4, 1, 8, 2, 9, 0, 6, 3], [4, 9, 3, 7, 8, 2, 0, 1, 5, 6], [7, 8, 5, 4, 1, 6, 0, 9, 2, 3], [7, 4, 5, 2, 1, 0, 3, 8, 6, 9], [6, 0, 2, 7, 1, 9, 3, 5, 4, 8], [4, 7, 6, 8, 3, 1, 0, 2, 9, 5], [7, 2, 8, 3, 0, 6, 1, 5, 4, 9], [4, 0, 3, 2, 7, 1, 8, 9, 5, 6], [1, 7, 2, 5, 6, 3, 8, 0, 4, 9], [2, 5, 0, 8, 6, 3, 1, 7, 4, 9], [6, 8, 2, 3, 7, 1, 4, 0, 9, 5], [2, 9, 8, 7, 3, 1, 6, 0, 5, 4], [7, 2, 4, 1, 0, 8, 3, 5, 9, 6], [1, 3, 7, 8, 2, 6, 4, 0, 5, 9], [7, 5, 2, 1, 4, 0, 6, 8, 3, 9], [7, 0, 9, 3, 8, 2, 5, 4, 6, 1], [4, 7, 5, 8, 1, 0, 2, 3, 6, 9], [5, 7, 4, 3, 2, 0, 8, 9, 6, 1], [4, 7, 8, 2, 1, 0, 3, 5, 6, 9], [7, 1, 0, 2, 8, 4, 6, 3, 9, 5], [4, 9, 0, 7, 1, 8, 3, 2, 5, 6], [7, 4, 1, 5, 2, 3, 6, 8, 9, 0], [7, 3, 1, 9, 8, 2, 6, 0, 5, 4], [2, 7, 0, 3, 6, 1, 9, 5, 8, 4], [7, 1, 4, 8, 0, 5, 3, 6, 2, 9], [3, 5, 4, 1, 6, 2, 7, 0, 9, 8], [2, 7, 8, 9, 0, 1, 4, 5, 6, 3], [7, 2, 8, 1, 0, 5, 4, 6, 3, 9], [7, 2, 1, 5, 8, 3, 0, 6, 4, 9], [2, 7, 9, 3, 0, 8, 1, 6, 4, 5], [1, 4, 5, 2, 6, 9, 8, 0, 7, 3], [7, 2, 5, 3, 1, 8, 0, 6, 9, 4], [7, 1, 2, 3, 4, 0, 9, 6, 8, 5], [2, 8, 6, 9, 3, 0, 7, 5, 4, 1], [1, 4, 7, 2, 0, 8, 5, 6, 3, 9], [7, 4, 2, 5, 0, 8, 3, 1, 6, 9], [7, 2, 8, 5, 4, 0, 3, 1, 6, 9], [5, 1, 4, 0, 2, 3, 8, 6, 7, 9], [2, 7, 4, 8, 5, 1, 0, 6, 9, 3], [7, 4, 3, 5, 2, 8, 0, 1, 9, 6], [1, 7, 0, 4, 2, 6, 8, 5, 9, 3], [7, 4, 8, 2, 6, 1, 5, 9, 0, 3], [4, 3, 7, 8, 6, 9, 0, 2, 5, 1], [1, 4, 7, 5, 0, 2, 8, 6, 9, 3], [4, 7, 1, 2, 3, 0, 6, 8, 5, 9], [0, 1, 4, 7, 2, 5, 3, 9, 8, 6], [4, 2, 7, 1, 3, 0, 8, 6, 9, 5], [7, 2, 3, 0, 5, 4, 8, 1, 9, 6], [5, 7, 2, 4, 8, 1, 3, 0, 6, 9], [1, 7, 5, 4, 2, 3, 8, 9, 6, 0], [4, 5, 1, 0, 6, 3, 8, 2, 9, 7], [2, 6, 8, 7, 9, 4, 5, 1, 3, 0], [2, 1, 7, 0, 8, 3, 6, 9, 4, 5], [4, 7, 0, 5, 2, 1, 3, 8, 9, 6], [7, 0, 3, 2, 9, 8, 6, 1, 5, 4], [7, 2, 3, 0, 8, 9, 5, 6, 4, 1], [7, 2, 4, 1, 5, 8, 0, 6, 3, 9], [4, 5, 7, 6, 1, 2, 0, 9, 8, 3], [1, 0, 7, 2, 9, 3, 8, 6, 5, 4], [6, 9, 3, 0, 1, 8, 2, 7, 5, 4], [0, 2, 8, 7, 6, 9, 4, 1, 3, 5], [4, 5, 7, 1, 2, 3, 8, 6, 9, 0], [4, 7, 5, 2, 3, 8, 6, 9, 1, 0], [1, 4, 5, 7, 2, 8, 6, 0, 3, 9], [7, 2, 5, 0, 1, 3, 8, 6, 9, 4], [7, 8, 4, 2, 3, 0, 5, 1, 9, 6], [7, 5, 4, 1, 0, 2, 8, 3, 9, 6], [7, 2, 3, 0, 8, 5, 9, 6, 1, 4], [7, 6, 2, 3, 5, 4, 0, 8, 1, 9], [1, 7, 4, 2, 5, 0, 8, 6, 3, 9], [4, 0, 7, 1, 6, 2, 8, 5, 9, 3], [0, 2, 7, 3, 1, 6, 8, 5, 9, 4], [7, 1, 3, 2, 0, 8, 9, 6, 5, 4], [1, 6, 4, 2, 7, 5, 0, 3, 8, 9], [5, 7, 2, 4, 8, 1, 9, 0, 6, 3], [7, 4, 5, 0, 1, 2, 8, 3, 9, 6], [0, 2, 3, 1, 8, 5, 4, 6, 7, 9], [7, 2, 8, 1, 5, 0, 3, 4, 6, 9], [4, 2, 1, 7, 3, 0, 8, 9, 5, 6], [4, 5, 0, 3, 8, 6, 7, 9, 1, 2], [3, 0, 6, 9, 8, 7, 1, 2, 5, 4], [5, 0, 1, 7, 3, 2, 8, 6, 9, 4], [4, 1, 5, 7, 2, 3, 8, 6, 0, 9], [7, 8, 1, 2, 3, 5, 4, 0, 9, 6], [0, 5, 6, 8, 1, 4, 9, 3, 7, 2], [7, 2, 5, 4, 0, 8, 9, 3, 1, 6], [7, 4, 1, 6, 8, 2, 3, 0, 5, 9], [4, 7, 5, 2, 1, 3, 8, 6, 9, 0], [9, 1, 6, 0, 3, 5, 2, 7, 8, 4], [2, 4, 3, 5, 8, 1, 0, 7, 9, 6], [7, 0, 2, 3, 9, 1, 8, 5, 4, 6], [1, 5, 6, 8, 7, 9, 2, 4, 0, 3], [4, 7, 8, 3, 0, 1, 5, 6, 2, 9], [1, 3, 2, 8, 0, 6, 9, 5, 7, 4], [2, 4, 1, 6, 3, 0, 9, 8, 7, 5], [7, 1, 2, 4, 0, 8, 5, 3, 6, 9], [3, 8, 2, 7, 1, 6, 9, 5, 4, 0], [4, 7, 2, 5, 8, 0, 3, 6, 9, 1], [0, 6, 1, 8, 7, 2, 3, 9, 5, 4], [5, 7, 1, 0, 9, 2, 8, 3, 4, 6], [7, 4, 3, 0, 1, 2, 5, 6, 8, 9], [2, 1, 7, 8, 0, 6, 5, 3, 9, 4], [2, 5, 3, 7, 8, 4, 0, 6, 9, 1], [3, 1, 8, 0, 5, 9, 6, 2, 7, 4], [7, 1, 6, 4, 5, 2, 8, 3, 0, 9], [4, 5, 0, 1, 7, 3, 6, 2, 9, 8], [7, 4, 5, 0, 1, 3, 2, 8, 9, 6], [2, 1, 0, 7, 4, 6, 5, 3, 8, 9], [4, 5, 1, 7, 3, 2, 0, 9, 6, 8], [4, 1, 0, 5, 7, 6, 2, 3, 8, 9], [7, 4, 1, 5, 0, 6, 8, 2, 3, 9], [1, 7, 4, 0, 2, 3, 5, 9, 8, 6], [8, 7, 1, 4, 2, 5, 6, 0, 9, 3], [7, 5, 8, 4, 2, 0, 3, 9, 6, 1], [7, 2, 0, 5, 8, 3, 9, 6, 4, 1], [7, 4, 5, 6, 3, 2, 8, 1, 0, 9], [4, 7, 5, 1, 2, 0, 3, 8, 6, 9], [4, 5, 2, 7, 9, 1, 3, 8, 6, 0], [7, 0, 3, 8, 5, 2, 4, 1, 6, 9], [4, 1, 7, 5, 6, 2, 0, 8, 3, 9], [2, 4, 5, 7, 3, 1, 8, 6, 0, 9], [7, 5, 6, 1, 0, 2, 9, 8, 3, 4], [4, 7, 5, 2, 1, 8, 3, 6, 9, 0], [5, 6, 1, 0, 9, 4, 8, 7, 2, 3], [3, 4, 7, 0, 2, 8, 5, 9, 1, 6], [3, 2, 7, 1, 8, 4, 0, 9, 5, 6], [1, 6, 9, 0, 4, 7, 2, 8, 5, 3], [7, 2, 0, 3, 1, 8, 5, 9, 6, 4], [4, 5, 0, 1, 3, 7, 6, 2, 9, 8], [7, 4, 0, 8, 1, 3, 5, 2, 9, 6], [4, 7, 0, 8, 9, 5, 3, 1, 2, 6], [7, 2, 0, 3, 8, 9, 4, 5, 1, 6], [4, 0, 7, 2, 1, 5, 8, 6, 3, 9], [7, 2, 5, 8, 0, 6, 9, 3, 1, 4], [4, 7, 2, 5, 6, 8, 1, 9, 3, 0], [4, 5, 7, 1, 0, 9, 2, 8, 3, 6], [7, 2, 8, 9, 1, 5, 6, 0, 4, 3], [5, 4, 7, 0, 2, 3, 1, 9, 8, 6], [7, 0, 2, 5, 4, 3, 8, 1, 9, 6], [7, 1, 8, 6, 2, 3, 0, 9, 5, 4], [7, 4, 5, 2, 3, 1, 0, 8, 9, 6], [0, 8, 2, 7, 9, 3, 6, 1, 4, 5], [4, 5, 1, 3, 0, 2, 7, 8, 6, 9], [5, 7, 2, 6, 9, 8, 0, 4, 3, 1], [0, 5, 2, 3, 7, 8, 4, 1, 9, 6], [7, 3, 1, 5, 0, 2, 8, 4, 6, 9], [1, 5, 6, 9, 8, 7, 0, 2, 4, 3], [0, 5, 3, 6, 4, 9, 1, 8, 7, 2], [0, 6, 7, 2, 5, 8, 4, 9, 3, 1], [7, 0, 5, 1, 2, 8, 6, 3, 9, 4], [7, 5, 1, 0, 2, 3, 4, 6, 8, 9], [7, 2, 1, 8, 6, 0, 5, 3, 9, 4], [1, 4, 7, 0, 5, 3, 2, 8, 6, 9], [4, 7, 1, 0, 2, 6, 5, 3, 8, 9], [1, 4, 6, 2, 7, 8, 5, 3, 0, 9], [3, 0, 6, 2, 5, 4, 8, 9, 1, 7], [7, 8, 0, 2, 6, 3, 5, 1, 9, 4], [2, 1, 0, 3, 8, 4, 6, 9, 5, 7], [1, 7, 2, 8, 4, 5, 3, 0, 6, 9], [2, 1, 7, 5, 8, 0, 3, 6, 4, 9], [1, 7, 8, 2, 3, 9, 4, 5, 6, 0], [5, 4, 1, 2, 3, 8, 7, 0, 6, 9], [7, 2, 0, 3, 8, 4, 6, 9, 1, 5], [0, 3, 7, 6, 8, 2, 5, 1, 9, 4], [1, 7, 2, 5, 8, 0, 6, 9, 4, 3], [4, 5, 0, 1, 2, 3, 9, 6, 8, 7], [7, 4, 1, 5, 0, 8, 2, 6, 3, 9], [5, 4, 0, 6, 2, 1, 7, 3, 8, 9], [1, 4, 5, 0, 7, 2, 3, 9, 8, 6], [7, 2, 5, 3, 0, 1, 8, 6, 9, 4], [7, 5, 6, 4, 2, 1, 3, 0, 8, 9], [2, 8, 3, 0, 6, 7, 5, 9, 1, 4], [7, 2, 3, 6, 0, 1, 8, 9, 5, 4], [2, 3, 5, 8, 7, 1, 4, 0, 6, 9], [0, 7, 2, 6, 8, 3, 1, 4, 9, 5], [4, 0, 3, 7, 1, 5, 6, 8, 9, 2], [7, 3, 4, 2, 5, 0, 1, 8, 9, 6], [7, 2, 5, 3, 0, 1, 4, 8, 6, 9], [2, 8, 7, 1, 9, 6, 3, 0, 5, 4], [5, 6, 1, 9, 3, 0, 4, 2, 8, 7], [7, 5, 1, 2, 8, 3, 6, 4, 9, 0], [7, 5, 0, 9, 8, 2, 1, 6, 3, 4], [2, 7, 1, 0, 8, 6, 3, 9, 4, 5], [2, 7, 0, 8, 5, 3, 1, 6, 9, 4]]
sort_test = [[0, 1, 3, 2, 8, 9, 7, 6, 4, 5], [8, 1, 6, 5, 3, 0, 2, 7, 9, 4], [5, 7, 2, 4, 0, 3, 8, 1, 6, 9], [5, 4, 7, 0, 2, 8, 3, 1, 9, 6], [1, 7, 3, 2, 6, 8, 0, 9, 5, 4], [1, 7, 0, 2, 9, 3, 8, 6, 5, 4], [4, 7, 0, 2, 1, 8, 6, 9, 3, 5], [7, 0, 2, 3, 9, 6, 8, 4, 5, 1], [7, 0, 4, 5, 2, 8, 1, 3, 6, 9], [5, 7, 0, 4, 2, 8, 3, 1, 9, 6], [7, 1, 5, 0, 4, 2, 9, 8, 3, 6], [4, 7, 2, 5, 1, 8, 0, 9, 3, 6], [5, 4, 7, 2, 8, 9, 3, 1, 0, 6], [7, 4, 2, 3, 8, 6, 5, 9, 0, 1], [7, 1, 8, 2, 0, 9, 5, 6, 4, 3], [1, 4, 7, 5, 0, 3, 2, 8, 6, 9], [7, 3, 0, 6, 8, 9, 2, 5, 1, 4], [7, 5, 4, 2, 3, 0, 6, 8, 9, 1], [4, 5, 8, 2, 7, 3, 9, 6, 0, 1], [3, 1, 7, 8, 6, 2, 4, 9, 5, 0], [5, 4, 7, 0, 1, 3, 6, 2, 9, 8], [1, 7, 3, 6, 0, 2, 8, 9, 5, 4], [7, 0, 8, 5, 6, 2, 3, 4, 9, 1], [3, 5, 7, 0, 4, 9, 8, 2, 1, 6], [7, 0, 8, 2, 5, 3, 1, 6, 9, 4], [0, 2, 4, 7, 5, 1, 8, 3, 9, 6], [2, 5, 7, 0, 6, 8, 9, 3, 4, 1], [2, 7, 0, 8, 9, 3, 5, 1, 4, 6], [8, 2, 7, 5, 0, 3, 6, 1, 4, 9], [7, 1, 3, 0, 2, 8, 9, 6, 4, 5], [7, 2, 1, 5, 4, 3, 6, 8, 0, 9], [6, 2, 0, 8, 7, 3, 1, 9, 5, 4], [0, 5, 4, 1, 3, 7, 6, 8, 9, 2], [7, 4, 1, 3, 5, 2, 8, 9, 6, 0], [7, 2, 0, 8, 9, 4, 3, 1, 5, 6], [2, 5, 8, 7, 0, 1, 9, 3, 6, 4], [1, 6, 5, 3, 4, 8, 7, 2, 0, 9], [1, 3, 7, 2, 6, 0, 8, 9, 5, 4], [2, 6, 7, 1, 0, 3, 8, 9, 4, 5], [7, 4, 1, 0, 5, 6, 8, 3, 2, 9], [5, 4, 7, 2, 1, 3, 0, 9, 6, 8], [6, 0, 3, 5, 8, 4, 9, 1, 2, 7], [7, 2, 8, 5, 3, 0, 4, 1, 6, 9], [4, 5, 7, 0, 1, 8, 3, 2, 6, 9], [6, 1, 7, 2, 0, 3, 5, 4, 8, 9], [4, 3, 0, 1, 6, 9, 7, 8, 2, 5], [0, 4, 8, 7, 3, 2, 6, 9, 5, 1], [9, 1, 2, 6, 8, 7, 3, 4, 0, 5], [1, 8, 6, 0, 3, 2, 9, 7, 5, 4], [2, 7, 8, 5, 1, 4, 0, 3, 9, 6], [7, 3, 8, 9, 6, 2, 5, 4, 0, 1], [7, 6, 2, 1, 5, 4, 0, 8, 3, 9], [7, 4, 0, 3, 1, 2, 5, 8, 9, 6], [7, 5, 1, 2, 8, 6, 3, 9, 4, 0], [7, 2, 5, 4, 3, 0, 8, 6, 1, 9], [1, 7, 2, 6, 0, 8, 5, 3, 4, 9], [4, 6, 1, 3, 7, 0, 2, 8, 5, 9], [7, 4, 5, 0, 2, 8, 3, 9, 1, 6], [6, 7, 0, 1, 4, 2, 5, 3, 8, 9], [2, 7, 0, 3, 8, 1, 4, 5, 6, 9], [0, 2, 8, 6, 1, 7, 4, 5, 9, 3], [7, 2, 1, 4, 0, 5, 8, 3, 9, 6], [7, 9, 1, 8, 4, 2, 5, 3, 6, 0], [1, 4, 8, 7, 0, 9, 2, 3, 5, 6], [8, 7, 0, 2, 6, 4, 5, 3, 1, 9], [8, 7, 4, 5, 6, 2, 3, 0, 9, 1], [7, 3, 0, 2, 8, 4, 9, 5, 6, 1], [4, 5, 7, 2, 0, 3, 1, 8, 9, 6], [4, 7, 5, 0, 6, 1, 2, 8, 3, 9], [4, 7, 5, 0, 3, 2, 8, 1, 6, 9], [4, 7, 2, 5, 0, 8, 9, 3, 6, 1], [0, 2, 7, 1, 6, 8, 5, 3, 9, 4], [7, 2, 8, 1, 3, 6, 0, 9, 5, 4], [7, 4, 2, 3, 0, 8, 5, 9, 1, 6], [2, 8, 0, 6, 7, 1, 5, 9, 3, 4], [7, 4, 5, 1, 2, 3, 8, 9, 6, 0], [1, 7, 2, 6, 8, 9, 5, 0, 3, 4], [7, 5, 2, 0, 3, 6, 4, 8, 9, 1], [7, 2, 6, 8, 3, 1, 0, 9, 5, 4], [4, 5, 0, 7, 8, 2, 9, 1, 3, 6], [5, 4, 7, 3, 2, 0, 1, 6, 8, 9], [7, 4, 1, 5, 0, 6, 3, 2, 8, 9], [0, 7, 1, 5, 4, 8, 9, 2, 6, 3], [5, 7, 2, 1, 8, 0, 4, 9, 6, 3], [2, 5, 0, 4, 7, 6, 3, 1, 8, 9], [4, 7, 5, 0, 2, 1, 3, 6, 8, 9], [1, 8, 6, 2, 3, 9, 5, 7, 0, 4], [7, 4, 0, 2, 5, 3, 8, 1, 9, 6], [2, 1, 3, 8, 7, 5, 9, 4, 6, 0], [1, 0, 6, 3, 7, 9, 2, 8, 5, 4], [1, 7, 6, 5, 4, 2, 8, 0, 3, 9], [5, 1, 2, 6, 7, 8, 9, 0, 3, 4], [7, 0, 3, 2, 6, 8, 1, 5, 9, 4], [4, 7, 5, 1, 0, 6, 8, 2, 9, 3], [0, 1, 6, 4, 9, 5, 3, 7, 8, 2], [1, 7, 0, 5, 3, 8, 2, 9, 6, 4], [7, 0, 2, 3, 6, 8, 5, 9, 1, 4], [7, 1, 4, 8, 2, 6, 0, 9, 5, 3], [7, 8, 1, 2, 3, 0, 4, 5, 9, 6], [4, 7, 1, 0, 5, 6, 2, 8, 3, 9], [1, 7, 2, 8, 5, 0, 6, 9, 4, 3], [2, 7, 3, 4, 0, 5, 8, 9, 1, 6], [0, 1, 5, 6, 2, 9, 8, 4, 3, 7], [7, 8, 9, 2, 0, 6, 1, 4, 3, 5], [6, 7, 1, 0, 8, 3, 2, 9, 5, 4], [1, 4, 0, 3, 6, 9, 5, 8, 2, 7], [7, 5, 4, 1, 8, 2, 3, 6, 0, 9], [3, 1, 7, 0, 4, 2, 5, 9, 8, 6], [5, 7, 0, 2, 3, 1, 8, 6, 9, 4], [1, 2, 0, 7, 3, 6, 8, 9, 5, 4], [1, 5, 7, 2, 6, 8, 0, 3, 9, 4], [0, 7, 1, 3, 6, 5, 9, 8, 2, 4], [7, 2, 3, 0, 5, 4, 1, 8, 9, 6], [6, 2, 7, 3, 5, 4, 9, 8, 0, 1], [7, 2, 5, 6, 9, 4, 0, 1, 8, 3], [8, 7, 2, 3, 0, 6, 1, 9, 4, 5], [1, 5, 7, 8, 6, 0, 3, 2, 9, 4], [7, 2, 1, 0, 6, 5, 3, 8, 9, 4], [4, 1, 7, 0, 5, 3, 8, 2, 6, 9], [5, 4, 7, 3, 8, 2, 1, 6, 0, 9], [1, 7, 2, 0, 6, 3, 8, 9, 5, 4], [4, 7, 5, 3, 8, 2, 0, 6, 9, 1], [7, 2, 8, 9, 5, 1, 0, 6, 3, 4], [7, 5, 2, 8, 9, 0, 6, 3, 1, 4], [7, 4, 5, 8, 1, 2, 6, 3, 9, 0], [7, 2, 4, 8, 1, 3, 5, 9, 0, 6], [7, 4, 2, 0, 3, 1, 9, 8, 6, 5], [0, 6, 3, 5, 9, 1, 7, 8, 2, 4], [5, 7, 9, 1, 0, 4, 8, 6, 2, 3], [4, 7, 0, 1, 5, 2, 3, 8, 6, 9], [7, 3, 6, 8, 1, 5, 0, 2, 4, 9], [2, 3, 0, 8, 7, 1, 6, 9, 4, 5], [5, 7, 4, 6, 1, 2, 0, 3, 8, 9], [8, 0, 5, 2, 4, 6, 3, 7, 1, 9], [1, 4, 7, 5, 9, 0, 2, 6, 8, 3], [1, 7, 2, 6, 4, 8, 5, 0, 3, 9], [6, 9, 8, 7, 4, 5, 2, 1, 3, 0], [7, 4, 8, 2, 0, 5, 9, 3, 6, 1], [7, 0, 3, 5, 4, 1, 2, 8, 6, 9], [7, 5, 4, 6, 1, 0, 9, 8, 2, 3], [2, 7, 5, 4, 3, 0, 1, 8, 6, 9], [4, 0, 8, 2, 1, 7, 6, 5, 9, 3], [4, 0, 3, 2, 1, 7, 8, 9, 6, 5], [1, 4, 7, 5, 2, 0, 8, 3, 6, 9], [4, 5, 7, 2, 3, 0, 1, 6, 8, 9], [9, 0, 7, 8, 4, 2, 5, 3, 1, 6], [1, 4, 5, 7, 9, 8, 2, 0, 6, 3], [0, 4, 1, 3, 7, 2, 9, 5, 8, 6], [4, 2, 3, 7, 1, 8, 9, 5, 6, 0], [3, 7, 2, 8, 6, 1, 0, 9, 4, 5], [0, 6, 1, 7, 2, 8, 3, 5, 4, 9], [7, 2, 0, 5, 3, 4, 8, 1, 6, 9], [7, 6, 2, 4, 5, 8, 9, 0, 3, 1], [7, 5, 4, 1, 2, 0, 3, 6, 8, 9], [7, 4, 5, 6, 2, 0, 3, 1, 8, 9], [7, 2, 1, 6, 8, 5, 0, 4, 3, 9], [5, 4, 1, 7, 0, 8, 6, 2, 3, 9], [2, 0, 8, 3, 1, 6, 9, 7, 5, 4], [7, 1, 5, 2, 8, 0, 3, 6, 9, 4], [7, 8, 9, 6, 2, 5, 3, 0, 1, 4], [4, 0, 5, 7, 3, 1, 2, 8, 9, 6], [3, 4, 1, 0, 6, 9, 8, 7, 2, 5], [5, 6, 1, 8, 0, 7, 9, 2, 3, 4], [2, 7, 3, 0, 8, 9, 5, 4, 6, 1], [5, 1, 7, 3, 0, 4, 2, 9, 8, 6], [5, 1, 4, 0, 7, 2, 3, 8, 6, 9], [8, 7, 0, 3, 2, 1, 6, 9, 4, 5], [7, 2, 9, 6, 1, 8, 0, 5, 3, 4], [7, 4, 0, 5, 3, 1, 2, 6, 8, 9], [4, 2, 0, 9, 1, 6, 3, 7, 8, 5], [4, 3, 9, 2, 8, 1, 5, 0, 6, 7], [4, 0, 5, 1, 6, 2, 7, 8, 9, 3], [5, 2, 7, 8, 0, 4, 3, 1, 9, 6], [7, 8, 2, 4, 0, 1, 5, 3, 9, 6], [7, 2, 5, 1, 3, 4, 9, 0, 8, 6], [4, 0, 5, 2, 9, 8, 6, 1, 3, 7], [3, 7, 2, 5, 1, 9, 0, 8, 6, 4], [1, 0, 2, 3, 7, 9, 6, 8, 5, 4], [4, 7, 2, 5, 1, 0, 8, 3, 6, 9], [0, 2, 4, 6, 5, 9, 7, 3, 1, 8], [0, 6, 5, 7, 1, 4, 2, 8, 3, 9], [2, 7, 8, 0, 4, 6, 5, 9, 1, 3], [8, 6, 5, 9, 2, 7, 1, 3, 0, 4], [3, 0, 5, 1, 8, 6, 7, 4, 9, 2], [6, 2, 0, 7, 8, 3, 1, 9, 4, 5], [5, 8, 1, 2, 0, 3, 7, 9, 4, 6], [5, 7, 2, 8, 4, 1, 3, 6, 0, 9], [6, 0, 7, 2, 8, 3, 1, 4, 9, 5], [1, 3, 9, 4, 0, 2, 6, 5, 7, 8], [2, 7, 1, 0, 5, 8, 4, 6, 9, 3], [5, 1, 7, 2, 6, 9, 8, 3, 0, 4], [4, 5, 2, 7, 1, 0, 6, 9, 8, 3], [7, 0, 1, 3, 4, 2, 9, 5, 8, 6], [6, 7, 5, 1, 2, 9, 3, 8, 4, 0], [4, 2, 3, 6, 8, 0, 7, 5, 1, 9], [1, 7, 2, 8, 0, 6, 3, 9, 5, 4], [7, 5, 2, 3, 1, 8, 0, 6, 4, 9], [7, 4, 1, 6, 0, 8, 5, 9, 2, 3], [7, 4, 2, 1, 5, 3, 0, 8, 9, 6], [7, 5, 2, 8, 1, 6, 9, 0, 3, 4], [5, 3, 4, 8, 1, 9, 6, 2, 0, 7], [7, 2, 1, 8, 6, 0, 9, 3, 4, 5], [7, 0, 2, 3, 1, 8, 9, 6, 4, 5], [0, 2, 8, 7, 5, 4, 9, 3, 6, 1], [7, 0, 1, 6, 3, 2, 8, 9, 4, 5], [7, 1, 5, 4, 8, 2, 3, 6, 0, 9], [5, 2, 7, 3, 6, 0, 9, 1, 4, 8], [1, 3, 6, 7, 9, 2, 8, 5, 4, 0], [4, 7, 0, 1, 5, 3, 8, 6, 9, 2], [7, 5, 2, 4, 0, 3, 8, 6, 1, 9], [3, 0, 4, 5, 7, 8, 2, 9, 6, 1], [1, 7, 2, 4, 3, 0, 5, 6, 8, 9], [7, 4, 2, 5, 3, 6, 8, 9, 1, 0], [5, 7, 4, 0, 1, 6, 8, 2, 9, 3], [7, 2, 0, 4, 5, 1, 8, 9, 3, 6], [1, 0, 2, 8, 4, 7, 5, 3, 6, 9], [0, 5, 1, 7, 3, 4, 6, 2, 9, 8], [4, 7, 2, 6, 8, 3, 0, 5, 9, 1], [1, 5, 0, 7, 3, 2, 9, 6, 8, 4], [3, 1, 5, 7, 4, 9, 8, 2, 6, 0], [2, 8, 7, 1, 6, 4, 5, 0, 3, 9], [7, 1, 6, 0, 8, 3, 2, 4, 5, 9], [7, 8, 1, 4, 3, 2, 0, 5, 9, 6], [5, 4, 7, 1, 0, 2, 8, 9, 3, 6], [7, 2, 5, 4, 3, 1, 8, 9, 0, 6], [3, 5, 0, 4, 7, 2, 1, 8, 9, 6], [1, 5, 9, 4, 7, 0, 2, 8, 6, 3], [5, 0, 1, 7, 8, 4, 6, 3, 2, 9], [7, 1, 4, 6, 0, 2, 5, 9, 8, 3], [4, 5, 7, 2, 3, 1, 0, 8, 9, 6], [7, 2, 5, 0, 8, 4, 3, 1, 6, 9], [7, 2, 0, 1, 4, 3, 5, 8, 6, 9], [5, 6, 0, 9, 7, 1, 8, 2, 3, 4], [5, 0, 8, 3, 9, 2, 4, 1, 6, 7], [7, 2, 3, 8, 0, 1, 5, 4, 6, 9], [4, 5, 1, 6, 7, 3, 8, 9, 2, 0], [3, 7, 2, 6, 5, 0, 9, 1, 8, 4], [1, 3, 7, 2, 5, 0, 8, 9, 6, 4], [3, 5, 4, 7, 6, 8, 2, 9, 0, 1], [0, 2, 7, 8, 1, 3, 5, 4, 9, 6], [0, 6, 8, 3, 2, 7, 9, 1, 5, 4], [4, 7, 2, 3, 0, 8, 5, 1, 9, 6], [8, 6, 1, 0, 9, 3, 2, 7, 4, 5], [0, 5, 6, 9, 1, 8, 3, 7, 2, 4], [5, 1, 2, 7, 3, 9, 6, 8, 4, 0], [7, 5, 3, 8, 9, 4, 1, 2, 0, 6], [6, 1, 9, 0, 8, 2, 4, 5, 3, 7], [0, 7, 2, 8, 4, 3, 1, 6, 5, 9], [4, 5, 7, 1, 0, 8, 2, 9, 3, 6], [1, 5, 7, 2, 6, 3, 0, 8, 4, 9], [7, 2, 0, 1, 5, 8, 3, 6, 4, 9], [4, 7, 6, 0, 2, 5, 8, 3, 1, 9], [7, 1, 0, 2, 3, 4, 5, 8, 6, 9], [4, 5, 7, 2, 0, 3, 8, 1, 6, 9], [3, 0, 2, 7, 9, 4, 5, 8, 1, 6], [7, 5, 4, 0, 1, 3, 2, 8, 9, 6], [4, 1, 7, 0, 5, 3, 2, 8, 6, 9], [4, 2, 7, 5, 1, 0, 3, 8, 9, 6], [4, 5, 8, 7, 2, 0, 1, 3, 9, 6], [6, 2, 0, 1, 8, 3, 9, 5, 7, 4], [1, 7, 4, 2, 0, 3, 8, 5, 9, 6], [7, 4, 5, 8, 2, 3, 0, 9, 1, 6], [7, 8, 6, 0, 3, 5, 2, 1, 9, 4], [1, 4, 7, 0, 2, 6, 3, 8, 9, 5], [2, 7, 4, 5, 8, 3, 0, 1, 6, 9], [2, 3, 7, 0, 1, 5, 4, 8, 6, 9], [7, 2, 3, 5, 4, 1, 0, 8, 6, 9], [2, 8, 1, 7, 6, 3, 0, 9, 5, 4], [7, 3, 2, 8, 0, 9, 6, 1, 4, 5], [5, 2, 7, 0, 1, 8, 9, 3, 4, 6], [1, 0, 2, 5, 7, 4, 3, 9, 8, 6], [7, 2, 3, 6, 1, 8, 0, 5, 9, 4], [1, 3, 8, 4, 7, 2, 0, 6, 9, 5], [7, 1, 4, 0, 2, 8, 5, 6, 3, 9], [7, 2, 0, 8, 1, 5, 6, 3, 4, 9], [3, 7, 2, 0, 4, 8, 1, 9, 5, 6], [7, 8, 1, 4, 2, 5, 0, 3, 6, 9], [7, 2, 3, 0, 4, 1, 5, 8, 9, 6], [7, 1, 6, 0, 3, 2, 8, 4, 5, 9], [7, 0, 2, 4, 1, 3, 8, 5, 6, 9], [7, 2, 0, 8, 3, 5, 4, 9, 6, 1], [7, 8, 2, 1, 6, 0, 3, 9, 5, 4], [2, 7, 6, 8, 9, 3, 0, 1, 4, 5], [2, 6, 8, 7, 0, 9, 3, 1, 5, 4], [7, 2, 5, 8, 6, 0, 3, 1, 9, 4], [5, 0, 7, 2, 3, 4, 1, 6, 8, 9], [3, 0, 2, 7, 4, 1, 5, 8, 9, 6], [7, 5, 4, 1, 0, 2, 6, 8, 3, 9], [5, 4, 7, 0, 1, 3, 8, 2, 6, 9], [3, 4, 7, 5, 8, 1, 2, 0, 6, 9], [2, 7, 8, 1, 0, 6, 3, 9, 4, 5], [7, 8, 5, 2, 3, 6, 0, 9, 1, 4], [5, 8, 7, 2, 3, 0, 6, 9, 1, 4], [7, 1, 4, 5, 3, 9, 2, 6, 0, 8], [4, 7, 0, 5, 2, 8, 3, 9, 6, 1], [1, 8, 6, 7, 2, 5, 0, 9, 4, 3], [7, 5, 4, 8, 3, 1, 2, 0, 9, 6], [4, 5, 1, 7, 3, 6, 9, 0, 8, 2], [1, 3, 0, 2, 6, 7, 5, 8, 4, 9], [1, 0, 7, 5, 3, 2, 8, 9, 6, 4], [2, 0, 3, 7, 5, 4, 8, 1, 6, 9], [7, 8, 0, 4, 5, 3, 6, 2, 1, 9], [2, 3, 0, 1, 8, 6, 7, 4, 9, 5], [2, 3, 8, 0, 7, 4, 1, 5, 6, 9], [3, 5, 4, 6, 0, 7, 9, 8, 1, 2], [7, 2, 5, 0, 6, 8, 1, 9, 3, 4], [2, 0, 8, 3, 1, 6, 7, 9, 4, 5], [5, 7, 2, 3, 1, 8, 0, 4, 6, 9], [4, 7, 1, 2, 6, 0, 3, 5, 8, 9], [4, 1, 5, 7, 3, 8, 2, 0, 6, 9], [6, 1, 3, 7, 5, 4, 0, 9, 8, 2], [7, 2, 3, 0, 1, 8, 5, 4, 6, 9], [5, 0, 7, 2, 1, 4, 8, 3, 6, 9], [0, 2, 7, 8, 3, 6, 1, 4, 9, 5], [7, 0, 5, 6, 1, 2, 8, 9, 3, 4], [4, 7, 6, 2, 5, 3, 0, 9, 8, 1], [4, 7, 2, 8, 5, 3, 0, 1, 9, 6], [2, 5, 3, 4, 6, 7, 0, 1, 8, 9], [7, 4, 0, 1, 8, 3, 2, 9, 6, 5], [4, 3, 9, 6, 8, 2, 7, 0, 1, 5], [5, 4, 7, 2, 3, 0, 8, 1, 9, 6], [4, 1, 0, 3, 7, 2, 6, 5, 8, 9], [6, 0, 7, 2, 8, 9, 1, 3, 4, 5], [7, 4, 2, 6, 0, 5, 3, 8, 1, 9], [2, 7, 0, 4, 5, 1, 3, 6, 8, 9], [1, 2, 6, 8, 4, 5, 9, 7, 3, 0], [7, 2, 1, 0, 3, 8, 6, 5, 9, 4], [4, 5, 7, 1, 2, 8, 0, 3, 6, 9], [4, 2, 5, 7, 6, 8, 1, 0, 9, 3], [4, 0, 7, 3, 8, 1, 2, 6, 9, 5], [1, 0, 4, 6, 3, 9, 8, 5, 7, 2], [7, 2, 5, 8, 0, 4, 1, 3, 6, 9], [7, 2, 8, 3, 9, 0, 6, 5, 4, 1], [7, 0, 9, 5, 2, 6, 3, 1, 8, 4], [2, 3, 9, 5, 8, 7, 0, 1, 6, 4], [7, 1, 2, 3, 5, 6, 4, 9, 0, 8], [7, 4, 1, 0, 6, 3, 5, 2, 8, 9], [0, 2, 8, 5, 7, 9, 6, 3, 1, 4], [4, 0, 5, 2, 7, 8, 3, 9, 6, 1], [6, 7, 1, 2, 5, 3, 0, 4, 8, 9], [2, 7, 1, 0, 8, 6, 5, 4, 3, 9], [7, 0, 4, 8, 2, 1, 3, 5, 6, 9], [5, 7, 3, 9, 1, 0, 6, 8, 2, 4], [8, 2, 6, 0, 5, 4, 9, 3, 1, 7], [1, 0, 7, 3, 4, 8, 2, 6, 5, 9], [7, 1, 0, 8, 2, 6, 5, 3, 4, 9], [5, 4, 7, 2, 8, 3, 0, 1, 6, 9], [7, 1, 4, 5, 2, 0, 3, 8, 6, 9], [1, 4, 5, 7, 8, 2, 6, 3, 0, 9], [6, 2, 8, 3, 0, 1, 9, 7, 5, 4], [7, 1, 2, 4, 5, 0, 8, 6, 3, 9], [4, 5, 0, 3, 6, 2, 7, 9, 8, 1], [7, 2, 8, 0, 3, 1, 4, 6, 9, 5], [7, 5, 8, 2, 3, 0, 4, 1, 9, 6], [7, 2, 8, 4, 0, 3, 9, 6, 1, 5], [4, 0, 7, 5, 8, 3, 6, 9, 2, 1], [5, 4, 7, 8, 1, 2, 0, 6, 9, 3], [0, 1, 3, 5, 4, 9, 8, 6, 2, 7], [7, 3, 2, 6, 0, 8, 1, 9, 5, 4], [1, 7, 8, 5, 4, 2, 6, 9, 0, 3], [7, 8, 1, 4, 9, 2, 5, 3, 0, 6], [4, 7, 2, 5, 1, 8, 6, 9, 0, 3], [7, 6, 1, 4, 5, 8, 2, 0, 9, 3], [4, 5, 1, 7, 0, 3, 8, 2, 9, 6], [8, 7, 2, 6, 3, 0, 1, 9, 5, 4], [1, 6, 0, 7, 2, 9, 8, 5, 3, 4], [4, 7, 3, 5, 8, 2, 1, 0, 9, 6], [1, 7, 6, 5, 8, 2, 9, 0, 3, 4], [4, 7, 1, 5, 6, 9, 2, 8, 3, 0], [5, 2, 7, 8, 0, 3, 9, 1, 6, 4], [4, 5, 0, 1, 7, 3, 2, 8, 9, 6], [2, 1, 5, 7, 8, 0, 6, 3, 9, 4], [7, 8, 4, 2, 9, 3, 5, 0, 6, 1], [4, 5, 3, 1, 7, 0, 9, 8, 6, 2], [7, 2, 4, 0, 5, 1, 6, 3, 8, 9], [7, 2, 8, 5, 0, 4, 1, 3, 9, 6], [0, 3, 6, 8, 2, 7, 9, 4, 1, 5], [4, 0, 5, 7, 2, 8, 1, 6, 9, 3], [4, 2, 0, 7, 3, 8, 1, 9, 6, 5], [7, 4, 5, 1, 8, 2, 0, 3, 6, 9], [7, 0, 2, 3, 8, 1, 4, 5, 6, 9], [1, 7, 2, 5, 3, 0, 4, 8, 9, 6], [7, 1, 2, 3, 0, 4, 8, 6, 5, 9], [7, 1, 4, 2, 8, 0, 3, 6, 5, 9], [7, 8, 6, 5, 4, 3, 1, 0, 2, 9], [6, 4, 1, 0, 5, 7, 3, 2, 8, 9], [6, 7, 5, 0, 2, 1, 3, 8, 4, 9], [0, 7, 2, 1, 3, 8, 9, 6, 4, 5], [7, 6, 2, 1, 5, 8, 9, 3, 0, 4], [7, 2, 4, 5, 1, 8, 0, 3, 9, 6], [1, 7, 0, 5, 4, 6, 2, 3, 8, 9], [6, 0, 5, 2, 8, 3, 4, 1, 7, 9], [5, 2, 7, 0, 1, 6, 8, 9, 3, 4], [7, 2, 8, 0, 3, 6, 1, 9, 5, 4], [7, 2, 4, 8, 0, 3, 5, 6, 9, 1], [7, 2, 0, 5, 4, 3, 1, 8, 6, 9], [5, 4, 7, 2, 1, 0, 8, 6, 3, 9], [7, 2, 1, 0, 5, 3, 4, 9, 8, 6], [2, 7, 5, 4, 3, 1, 9, 8, 6, 0], [5, 4, 0, 3, 7, 1, 2, 8, 9, 6], [7, 5, 4, 0, 2, 1, 6, 8, 9, 3], [6, 8, 2, 5, 7, 1, 0, 4, 3, 9], [4, 1, 7, 3, 0, 5, 8, 9, 6, 2], [7, 5, 0, 2, 8, 3, 1, 4, 6, 9], [4, 5, 7, 8, 0, 2, 3, 6, 1, 9], [7, 0, 1, 2, 6, 9, 8, 5, 3, 4], [2, 1, 7, 3, 0, 8, 4, 5, 9, 6], [4, 3, 7, 5, 0, 8, 9, 2, 6, 1], [5, 4, 7, 2, 8, 0, 9, 6, 3, 1], [2, 7, 1, 5, 8, 6, 4, 0, 3, 9], [0, 1, 2, 8, 3, 4, 7, 9, 6, 5], [4, 7, 1, 8, 6, 2, 3, 0, 9, 5], [5, 2, 7, 4, 8, 1, 0, 9, 3, 6], [4, 5, 9, 3, 7, 2, 6, 0, 8, 1], [7, 5, 4, 2, 3, 0, 8, 1, 6, 9], [4, 7, 5, 2, 8, 1, 3, 0, 6, 9], [7, 2, 0, 6, 9, 8, 3, 1, 5, 4], [5, 7, 2, 8, 0, 6, 9, 3, 4, 1], [7, 2, 5, 4, 3, 0, 1, 8, 9, 6], [5, 0, 4, 7, 2, 1, 8, 6, 9, 3], [1, 6, 2, 9, 3, 0, 8, 7, 5, 4], [7, 4, 0, 2, 1, 3, 6, 8, 5, 9], [9, 6, 1, 0, 8, 4, 5, 7, 3, 2], [0, 3, 7, 6, 2, 8, 9, 5, 4, 1], [7, 4, 1, 2, 5, 6, 8, 9, 0, 3], [7, 0, 4, 1, 3, 5, 2, 6, 8, 9], [7, 4, 2, 5, 0, 8, 3, 1, 9, 6], [7, 1, 0, 2, 6, 3, 8, 5, 9, 4], [7, 2, 9, 6, 0, 8, 1, 3, 4, 5], [5, 2, 7, 8, 9, 0, 3, 6, 1, 4], [4, 5, 0, 7, 9, 3, 2, 1, 8, 6], [7, 0, 1, 3, 2, 8, 9, 6, 5, 4], [0, 1, 6, 3, 2, 9, 8, 4, 5, 7], [7, 0, 4, 5, 2, 1, 8, 6, 3, 9], [0, 3, 9, 5, 2, 8, 7, 1, 4, 6], [6, 7, 4, 5, 1, 2, 8, 9, 0, 3], [2, 0, 4, 3, 1, 8, 9, 7, 5, 6], [1, 2, 7, 0, 6, 4, 8, 9, 3, 5], [1, 6, 8, 0, 2, 9, 7, 4, 5, 3], [8, 7, 1, 2, 0, 3, 5, 6, 9, 4], [5, 7, 0, 3, 1, 2, 6, 4, 8, 9], [1, 0, 6, 2, 4, 3, 5, 9, 7, 8], [0, 1, 7, 2, 6, 3, 4, 5, 8, 9], [5, 1, 7, 0, 2, 9, 8, 6, 4, 3], [7, 3, 2, 1, 8, 0, 9, 5, 4, 6], [3, 7, 1, 6, 2, 0, 8, 5, 9, 4], [5, 7, 0, 2, 1, 6, 8, 9, 3, 4], [4, 7, 2, 8, 1, 0, 6, 3, 5, 9], [5, 7, 2, 4, 8, 0, 1, 3, 6, 9], [1, 5, 0, 4, 7, 9, 6, 8, 3, 2], [1, 7, 2, 8, 6, 3, 9, 4, 5, 0], [2, 5, 4, 1, 7, 8, 6, 3, 0, 9], [7, 2, 5, 6, 8, 9, 1, 0, 3, 4], [5, 7, 8, 0, 1, 2, 9, 3, 4, 6], [0, 1, 7, 5, 2, 6, 3, 4, 8, 9], [5, 2, 7, 0, 3, 6, 8, 9, 4, 1], [0, 4, 6, 3, 8, 2, 7, 5, 1, 9], [2, 9, 8, 6, 3, 0, 7, 1, 5, 4], [7, 2, 8, 1, 6, 0, 9, 3, 4, 5], [7, 2, 8, 0, 5, 1, 6, 3, 4, 9], [7, 5, 4, 0, 2, 1, 3, 6, 8, 9], [6, 9, 7, 2, 5, 1, 8, 3, 4, 0], [4, 7, 1, 5, 0, 2, 6, 3, 8, 9], [5, 1, 3, 0, 4, 2, 8, 7, 6, 9], [7, 2, 5, 8, 6, 0, 9, 3, 4, 1], [7, 1, 4, 0, 2, 8, 5, 3, 6, 9], [1, 7, 5, 2, 4, 8, 3, 0, 9, 6], [0, 7, 2, 5, 8, 3, 1, 6, 9, 4], [5, 2, 1, 6, 8, 9, 7, 0, 4, 3], [4, 7, 5, 0, 2, 8, 3, 9, 6, 1], [0, 6, 3, 2, 8, 9, 7, 4, 5, 1], [5, 4, 7, 8, 2, 0, 9, 1, 6, 3], [7, 0, 8, 2, 6, 9, 4, 3, 5, 1], [2, 7, 1, 0, 4, 3, 9, 8, 6, 5], [1, 2, 8, 7, 4, 5, 6, 3, 0, 9], [2, 7, 6, 0, 3, 1, 5, 8, 4, 9], [7, 1, 2, 0, 3, 5, 4, 8, 6, 9], [6, 8, 0, 7, 9, 3, 2, 1, 4, 5], [1, 4, 0, 7, 5, 2, 8, 3, 9, 6], [7, 5, 1, 0, 6, 3, 2, 4, 8, 9], [0, 7, 6, 2, 3, 1, 8, 9, 4, 5], [7, 2, 5, 8, 6, 0, 9, 1, 4, 3], [4, 7, 1, 2, 0, 5, 3, 8, 9, 6], [2, 5, 0, 7, 1, 4, 9, 3, 8, 6], [7, 4, 5, 2, 9, 6, 0, 8, 1, 3], [2, 7, 4, 5, 8, 1, 3, 0, 6, 9], [6, 0, 7, 2, 5, 9, 1, 3, 4, 8], [7, 4, 1, 5, 0, 3, 6, 9, 8, 2], [5, 2, 1, 7, 6, 8, 0, 9, 3, 4], [5, 7, 2, 4, 6, 8, 1, 3, 9, 0], [7, 2, 0, 8, 3, 1, 6, 9, 4, 5], [0, 2, 7, 8, 1, 5, 3, 4, 6, 9], [4, 0, 1, 5, 7, 2, 3, 8, 9, 6], [1, 2, 7, 0, 5, 6, 8, 3, 4, 9], [3, 7, 0, 4, 1, 5, 8, 2, 6, 9], [5, 7, 2, 1, 8, 6, 4, 0, 3, 9], [7, 2, 0, 1, 5, 9, 8, 6, 4, 3], [1, 2, 6, 3, 0, 7, 8, 9, 4, 5], [4, 5, 0, 2, 7, 8, 1, 3, 9, 6], [6, 9, 7, 8, 2, 0, 1, 3, 5, 4], [7, 2, 0, 6, 8, 9, 3, 1, 4, 5], [1, 7, 0, 3, 2, 6, 8, 4, 5, 9], [7, 4, 5, 3, 0, 2, 8, 6, 1, 9], [7, 4, 0, 5, 2, 1, 6, 9, 8, 3], [4, 7, 0, 5, 2, 3, 8, 1, 9, 6], [4, 7, 5, 8, 2, 6, 3, 9, 0, 1], [7, 1, 5, 4, 6, 2, 8, 0, 9, 3], [7, 3, 2, 0, 9, 8, 6, 1, 5, 4], [0, 7, 2, 3, 5, 8, 4, 9, 6, 1], [2, 7, 0, 9, 6, 1, 5, 4, 3, 8], [7, 5, 4, 0, 2, 3, 8, 1, 6, 9], [1, 0, 3, 9, 8, 6, 2, 7, 5, 4], [0, 2, 7, 9, 6, 3, 8, 4, 5, 1], [4, 3, 1, 5, 8, 7, 6, 2, 0, 9], [4, 7, 0, 6, 5, 8, 2, 1, 3, 9], [7, 4, 1, 8, 6, 2, 5, 9, 3, 0], [0, 7, 1, 6, 5, 3, 2, 8, 4, 9], [7, 8, 3, 2, 4, 9, 0, 5, 1, 6], [7, 4, 1, 8, 0, 6, 3, 9, 2, 5], [2, 8, 1, 3, 6, 0, 7, 9, 5, 4], [7, 1, 3, 4, 2, 8, 0, 5, 9, 6], [7, 2, 1, 8, 0, 6, 3, 9, 5, 4], [3, 7, 2, 0, 1, 8, 6, 9, 5, 4], [4, 7, 2, 5, 0, 8, 1, 3, 9, 6], [2, 7, 4, 1, 5, 8, 9, 6, 0, 3], [8, 2, 7, 5, 4, 3, 0, 1, 9, 6], [1, 7, 4, 2, 0, 3, 5, 8, 9, 6], [2, 7, 1, 8, 6, 0, 9, 4, 5, 3], [2, 1, 8, 0, 3, 5, 6, 4, 9, 7], [4, 7, 5, 2, 9, 6, 8, 3, 0, 1], [7, 2, 1, 8, 0, 4, 9, 6, 3, 5], [7, 4, 1, 3, 6, 5, 0, 2, 9, 8], [1, 8, 7, 2, 0, 4, 3, 5, 6, 9], [6, 7, 5, 2, 8, 0, 3, 9, 4, 1], [7, 3, 5, 4, 2, 1, 0, 8, 9, 6], [1, 8, 6, 0, 7, 2, 9, 3, 5, 4], [1, 0, 2, 6, 8, 7, 9, 5, 3, 4], [6, 4, 5, 1, 7, 2, 0, 8, 3, 9], [7, 8, 6, 0, 5, 1, 3, 2, 9, 4], [3, 8, 1, 0, 2, 7, 5, 4, 9, 6], [3, 5, 4, 2, 6, 7, 0, 1, 8, 9], [1, 0, 3, 6, 2, 7, 8, 9, 4, 5], [2, 3, 7, 0, 8, 9, 1, 4, 6, 5], [6, 3, 5, 0, 8, 1, 2, 9, 7, 4], [4, 5, 0, 7, 2, 8, 1, 6, 3, 9], [0, 4, 5, 8, 2, 3, 9, 1, 6, 7], [7, 1, 2, 0, 8, 4, 3, 6, 5, 9], [1, 2, 7, 0, 8, 3, 4, 6, 5, 9], [5, 4, 1, 6, 0, 2, 3, 7, 8, 9], [8, 4, 3, 6, 7, 2, 5, 1, 0, 9], [9, 7, 1, 8, 4, 5, 2, 0, 6, 3], [1, 4, 2, 3, 0, 7, 8, 9, 6, 5], [7, 1, 4, 0, 2, 8, 3, 5, 9, 6], [0, 1, 7, 2, 4, 8, 5, 3, 9, 6], [1, 5, 4, 7, 6, 2, 0, 9, 3, 8], [3, 0, 8, 9, 7, 4, 2, 6, 1, 5], [0, 7, 5, 2, 9, 8, 3, 6, 4, 1], [1, 6, 7, 2, 0, 3, 8, 9, 5, 4], [7, 8, 2, 3, 1, 0, 6, 5, 9, 4], [3, 7, 1, 2, 0, 5, 8, 6, 4, 9], [7, 4, 2, 0, 5, 8, 6, 1, 9, 3], [4, 7, 2, 3, 0, 1, 8, 9, 6, 5], [4, 3, 8, 2, 7, 9, 0, 5, 6, 1], [0, 3, 7, 6, 2, 8, 1, 4, 9, 5], [6, 5, 1, 7, 2, 8, 0, 3, 9, 4], [1, 7, 4, 2, 8, 5, 0, 6, 9, 3], [6, 4, 0, 8, 3, 7, 1, 5, 2, 9], [1, 0, 4, 5, 7, 2, 8, 3, 9, 6], [0, 5, 9, 1, 7, 2, 4, 3, 6, 8], [0, 5, 4, 6, 7, 8, 1, 2, 9, 3], [4, 7, 2, 5, 1, 8, 3, 6, 0, 9], [5, 4, 0, 2, 7, 6, 3, 1, 8, 9], [7, 2, 6, 0, 3, 1, 8, 9, 5, 4], [0, 1, 6, 8, 2, 3, 9, 7, 5, 4], [8, 6, 0, 7, 4, 2, 5, 1, 3, 9], [2, 8, 7, 9, 6, 1, 3, 0, 5, 4], [2, 4, 3, 1, 8, 0, 5, 6, 7, 9], [7, 0, 4, 5, 1, 8, 3, 2, 6, 9], [7, 3, 1, 8, 2, 0, 6, 9, 5, 4], [7, 2, 8, 9, 0, 3, 4, 5, 1, 6], [3, 5, 6, 0, 7, 8, 2, 9, 1, 4], [5, 7, 2, 1, 9, 0, 8, 3, 4, 6], [5, 4, 3, 2, 0, 8, 7, 6, 9, 1], [5, 2, 4, 3, 7, 0, 1, 8, 6, 9], [4, 0, 9, 6, 5, 3, 1, 7, 8, 2], [3, 1, 0, 9, 6, 8, 2, 7, 5, 4], [8, 2, 6, 1, 7, 3, 0, 5, 9, 4], [7, 5, 8, 4, 2, 3, 0, 6, 1, 9], [5, 4, 7, 2, 1, 3, 0, 6, 9, 8], [7, 2, 0, 4, 1, 3, 5, 8, 6, 9], [7, 4, 5, 3, 2, 6, 8, 1, 9, 0], [4, 1, 2, 5, 7, 6, 8, 9, 0, 3], [7, 2, 4, 3, 8, 5, 1, 6, 0, 9], [2, 1, 8, 0, 7, 5, 4, 6, 9, 3], [7, 2, 5, 0, 1, 8, 3, 6, 9, 4], [2, 5, 7, 8, 1, 0, 6, 9, 4, 3], [7, 1, 6, 5, 4, 8, 3, 2, 0, 9], [0, 7, 2, 1, 6, 3, 8, 5, 9, 4], [2, 1, 8, 3, 7, 0, 5, 4, 9, 6], [4, 7, 1, 5, 3, 2, 8, 0, 9, 6], [7, 2, 0, 8, 1, 6, 5, 9, 4, 3], [2, 7, 0, 5, 3, 8, 9, 1, 6, 4], [7, 2, 3, 8, 0, 9, 1, 5, 4, 6], [7, 1, 4, 5, 2, 0, 3, 8, 9, 6], [8, 3, 2, 0, 6, 1, 7, 5, 4, 9], [5, 3, 2, 0, 7, 8, 9, 1, 6, 4], [4, 7, 1, 2, 5, 8, 0, 6, 9, 3], [5, 4, 7, 8, 0, 2, 1, 3, 6, 9], [7, 2, 5, 0, 3, 8, 1, 6, 4, 9], [7, 2, 5, 8, 6, 3, 1, 9, 0, 4], [4, 5, 7, 2, 0, 8, 3, 6, 9, 1], [1, 0, 4, 5, 7, 2, 6, 8, 3, 9], [1, 4, 2, 7, 3, 0, 5, 8, 6, 9], [3, 7, 0, 2, 8, 5, 6, 4, 9, 1], [0, 3, 2, 7, 8, 1, 6, 9, 5, 4], [1, 2, 7, 4, 8, 5, 3, 0, 6, 9], [5, 0, 6, 9, 3, 1, 4, 7, 8, 2], [7, 2, 6, 5, 8, 9, 0, 3, 1, 4], [7, 2, 4, 5, 8, 3, 1, 0, 9, 6], [5, 0, 4, 7, 2, 1, 3, 9, 8, 6], [7, 2, 1, 0, 4, 8, 6, 5, 3, 9], [4, 1, 0, 7, 3, 2, 6, 8, 5, 9], [1, 4, 0, 3, 5, 8, 6, 2, 9, 7], [7, 4, 8, 2, 0, 5, 6, 9, 1, 3], [1, 4, 0, 7, 5, 2, 3, 6, 8, 9], [7, 6, 2, 5, 0, 9, 3, 1, 4, 8], [7, 0, 5, 3, 2, 1, 8, 9, 6, 4], [0, 6, 1, 9, 8, 2, 4, 7, 3, 5], [1, 6, 3, 4, 5, 0, 7, 2, 9, 8], [6, 5, 7, 4, 1, 8, 3, 2, 0, 9], [0, 7, 1, 8, 3, 9, 2, 6, 5, 4], [2, 0, 8, 4, 3, 5, 9, 7, 6, 1], [0, 3, 7, 9, 1, 2, 6, 8, 4, 5], [5, 7, 6, 4, 3, 8, 0, 2, 9, 1], [1, 4, 6, 7, 3, 0, 9, 8, 2, 5], [0, 1, 4, 2, 3, 5, 7, 9, 8, 6], [4, 5, 0, 7, 2, 1, 6, 8, 9, 3], [7, 4, 5, 1, 0, 6, 2, 3, 8, 9], [3, 0, 2, 1, 7, 8, 9, 5, 6, 4], [5, 4, 0, 3, 7, 2, 8, 9, 6, 1], [1, 5, 4, 8, 7, 2, 6, 0, 3, 9], [1, 7, 4, 9, 5, 0, 6, 8, 3, 2], [1, 7, 2, 3, 0, 6, 8, 9, 5, 4], [7, 0, 3, 5, 2, 6, 8, 4, 1, 9], [0, 2, 7, 8, 3, 1, 6, 5, 9, 4], [7, 0, 4, 2, 1, 8, 3, 5, 9, 6], [0, 3, 5, 9, 2, 6, 8, 1, 7, 4], [1, 4, 5, 2, 3, 8, 7, 6, 0, 9], [3, 1, 8, 7, 2, 6, 0, 5, 9, 4], [7, 2, 5, 0, 3, 4, 8, 1, 9, 6], [6, 0, 3, 1, 9, 7, 2, 8, 5, 4], [7, 2, 1, 0, 3, 5, 6, 4, 8, 9], [7, 2, 4, 8, 0, 5, 3, 9, 1, 6], [7, 1, 4, 8, 6, 2, 5, 0, 3, 9], [2, 8, 3, 7, 6, 0, 9, 1, 4, 5], [7, 0, 1, 4, 2, 3, 8, 9, 5, 6], [0, 7, 4, 2, 8, 5, 1, 9, 3, 6], [7, 2, 4, 0, 1, 5, 8, 6, 3, 9], [3, 7, 4, 2, 0, 1, 6, 8, 5, 9], [0, 3, 9, 1, 8, 5, 6, 2, 4, 7], [0, 2, 7, 8, 5, 1, 3, 9, 6, 4], [1, 0, 8, 6, 3, 9, 2, 7, 4, 5], [4, 9, 7, 8, 0, 5, 3, 2, 6, 1], [7, 4, 1, 2, 8, 0, 6, 9, 5, 3], [1, 5, 0, 7, 8, 2, 6, 4, 3, 9], [1, 7, 2, 0, 3, 4, 5, 8, 9, 6], [7, 1, 2, 8, 0, 6, 3, 5, 4, 9], [7, 1, 4, 2, 8, 9, 3, 6, 0, 5], [6, 9, 1, 0, 2, 3, 7, 4, 5, 8], [4, 5, 7, 6, 1, 2, 0, 8, 3, 9], [7, 0, 5, 1, 8, 2, 3, 6, 9, 4], [4, 5, 7, 2, 0, 3, 8, 1, 6, 9], [5, 4, 7, 2, 3, 0, 9, 6, 8, 1], [1, 7, 0, 2, 8, 3, 5, 6, 4, 9], [3, 0, 8, 5, 1, 6, 7, 4, 2, 9], [2, 3, 7, 5, 1, 8, 4, 0, 9, 6], [7, 4, 0, 2, 3, 9, 6, 8, 5, 1], [7, 4, 1, 5, 8, 3, 2, 0, 6, 9], [7, 1, 2, 4, 0, 5, 8, 3, 9, 6], [1, 7, 2, 0, 6, 3, 5, 8, 4, 9], [5, 3, 7, 8, 2, 4, 0, 9, 6, 1], [4, 5, 7, 2, 0, 1, 3, 8, 6, 9], [7, 2, 0, 3, 4, 8, 5, 1, 9, 6], [4, 7, 5, 0, 2, 1, 6, 3, 8, 9], [7, 1, 4, 2, 6, 5, 3, 8, 9, 0], [0, 1, 3, 2, 8, 9, 6, 5, 4, 7], [1, 0, 2, 8, 7, 4, 3, 6, 9, 5], [7, 8, 2, 1, 4, 3, 5, 0, 6, 9], [7, 8, 5, 0, 2, 3, 9, 1, 4, 6], [7, 4, 1, 8, 2, 5, 3, 9, 0, 6], [7, 0, 2, 6, 8, 3, 1, 9, 5, 4], [7, 4, 5, 0, 1, 2, 8, 6, 9, 3], [4, 3, 2, 7, 5, 0, 8, 9, 1, 6], [4, 5, 7, 2, 0, 3, 8, 9, 1, 6], [7, 0, 5, 1, 6, 8, 4, 2, 3, 9], [7, 5, 4, 0, 2, 8, 3, 1, 6, 9], [7, 3, 1, 5, 2, 0, 4, 8, 6, 9], [0, 3, 7, 2, 4, 1, 5, 6, 9, 8], [7, 0, 2, 3, 6, 4, 1, 5, 8, 9], [1, 7, 2, 4, 8, 6, 0, 3, 9, 5], [4, 5, 2, 3, 0, 7, 6, 8, 9, 1], [3, 5, 1, 2, 7, 6, 8, 9, 4, 0], [1, 7, 2, 0, 6, 8, 9, 3, 4, 5], [7, 4, 3, 2, 5, 1, 0, 6, 8, 9], [5, 4, 2, 1, 7, 8, 6, 9, 0, 3], [3, 9, 8, 1, 6, 7, 0, 4, 2, 5], [7, 5, 2, 4, 0, 8, 1, 6, 3, 9], [5, 7, 4, 0, 2, 6, 9, 1, 8, 3], [8, 4, 2, 7, 1, 5, 3, 0, 9, 6], [7, 2, 3, 1, 5, 0, 8, 9, 4, 6], [6, 0, 9, 8, 3, 2, 5, 7, 1, 4], [0, 5, 8, 1, 2, 4, 6, 9, 7, 3], [5, 2, 0, 8, 1, 3, 6, 7, 4, 9], [0, 1, 2, 3, 6, 9, 8, 7, 5, 4], [5, 7, 2, 0, 3, 9, 8, 6, 1, 4], [7, 3, 2, 8, 6, 0, 9, 5, 4, 1], [5, 7, 2, 8, 0, 1, 3, 4, 6, 9], [1, 7, 6, 8, 5, 0, 3, 2, 4, 9], [3, 7, 5, 4, 0, 2, 8, 9, 1, 6], [7, 1, 5, 8, 6, 0, 2, 3, 9, 4], [3, 7, 0, 1, 8, 2, 6, 9, 4, 5], [5, 2, 7, 3, 1, 0, 8, 4, 6, 9], [4, 5, 7, 1, 8, 2, 3, 0, 9, 6], [1, 4, 8, 7, 0, 3, 5, 6, 2, 9], [1, 4, 5, 7, 2, 6, 8, 9, 3, 0], [7, 5, 4, 2, 1, 0, 3, 8, 6, 9], [4, 5, 2, 7, 8, 1, 0, 3, 6, 9], [0, 4, 1, 7, 5, 6, 9, 2, 8, 3], [1, 7, 5, 4, 0, 6, 2, 3, 8, 9], [7, 8, 2, 3, 4, 0, 5, 6, 9, 1], [4, 8, 5, 7, 2, 3, 0, 9, 1, 6], [0, 7, 2, 5, 1, 8, 3, 6, 9, 4], [4, 1, 7, 0, 3, 5, 9, 8, 6, 2], [4, 5, 1, 0, 6, 7, 9, 8, 2, 3], [1, 2, 3, 8, 0, 7, 9, 6, 5, 4], [7, 2, 8, 1, 0, 3, 9, 6, 5, 4], [4, 7, 5, 0, 3, 6, 1, 9, 2, 8], [1, 7, 6, 8, 3, 2, 0, 4, 9, 5], [0, 6, 4, 7, 5, 3, 9, 2, 8, 1], [2, 1, 0, 7, 8, 6, 3, 9, 5, 4], [7, 1, 4, 6, 8, 0, 2, 9, 5, 3], [1, 7, 0, 8, 2, 5, 6, 9, 3, 4], [2, 7, 8, 5, 1, 6, 3, 0, 9, 4], [7, 4, 5, 0, 8, 1, 2, 3, 9, 6], [5, 7, 0, 2, 3, 1, 6, 4, 8, 9], [5, 4, 6, 7, 1, 2, 0, 8, 9, 3], [4, 5, 7, 0, 2, 3, 1, 8, 6, 9], [1, 2, 5, 8, 9, 7, 3, 0, 6, 4], [1, 3, 5, 0, 2, 7, 9, 8, 6, 4], [1, 0, 6, 3, 7, 2, 8, 9, 5, 4], [2, 4, 8, 0, 1, 3, 5, 7, 6, 9], [5, 7, 0, 4, 1, 2, 8, 6, 3, 9], [7, 4, 2, 8, 1, 5, 6, 9, 3, 0], [4, 7, 5, 8, 2, 1, 9, 6, 3, 0], [7, 0, 3, 1, 8, 2, 6, 9, 5, 4], [7, 4, 3, 1, 2, 0, 8, 6, 9, 5], [8, 2, 3, 7, 0, 6, 4, 1, 9, 5], [7, 4, 5, 2, 0, 1, 3, 6, 8, 9], [4, 0, 7, 2, 3, 8, 9, 1, 5, 6], [2, 8, 7, 3, 6, 1, 0, 4, 9, 5], [5, 7, 8, 0, 3, 2, 9, 6, 1, 4], [7, 4, 2, 0, 8, 3, 5, 1, 9, 6], [0, 1, 6, 5, 9, 2, 7, 8, 4, 3], [9, 0, 3, 6, 8, 2, 7, 1, 5, 4], [7, 2, 1, 6, 0, 8, 9, 3, 5, 4], [1, 6, 9, 3, 0, 8, 5, 2, 7, 4], [5, 7, 1, 4, 2, 6, 0, 3, 8, 9], [1, 2, 7, 0, 4, 5, 3, 6, 8, 9], [4, 2, 3, 7, 8, 9, 1, 0, 6, 5], [5, 7, 8, 0, 6, 2, 3, 9, 1, 4], [6, 0, 2, 3, 4, 5, 7, 8, 1, 9], [5, 4, 7, 2, 0, 3, 1, 8, 6, 9], [7, 2, 3, 5, 1, 0, 6, 8, 9, 4], [4, 1, 5, 7, 3, 0, 8, 2, 6, 9], [0, 4, 1, 5, 7, 2, 8, 3, 6, 9], [5, 7, 2, 8, 3, 0, 9, 6, 4, 1], [5, 1, 7, 6, 2, 8, 3, 9, 4, 0], [7, 5, 8, 4, 2, 0, 3, 1, 9, 6], [7, 4, 5, 1, 2, 3, 0, 8, 6, 9], [4, 5, 6, 1, 7, 0, 2, 8, 3, 9], [7, 2, 3, 1, 5, 0, 8, 4, 9, 6], [2, 4, 1, 7, 8, 5, 3, 0, 9, 6], [4, 5, 7, 0, 3, 1, 2, 6, 8, 9], [7, 0, 1, 5, 3, 4, 6, 8, 2, 9], [4, 2, 5, 8, 3, 9, 7, 6, 1, 0], [7, 4, 0, 1, 9, 5, 3, 8, 6, 2], [4, 1, 0, 3, 7, 6, 8, 5, 9, 2], [2, 7, 5, 4, 0, 8, 3, 1, 6, 9], [7, 0, 2, 6, 5, 3, 8, 9, 1, 4], [0, 2, 1, 6, 7, 8, 5, 3, 9, 4], [7, 5, 0, 2, 8, 3, 9, 6, 1, 4], [2, 6, 1, 0, 7, 8, 4, 9, 3, 5], [2, 3, 7, 5, 8, 9, 0, 6, 1, 4], [5, 1, 7, 2, 4, 8, 3, 0, 6, 9], [5, 6, 1, 2, 0, 3, 8, 7, 9, 4], [7, 2, 4, 0, 5, 1, 8, 6, 9, 3], [7, 3, 4, 2, 0, 8, 1, 5, 6, 9], [4, 1, 3, 7, 0, 6, 8, 5, 2, 9], [0, 2, 6, 3, 9, 8, 4, 7, 1, 5], [4, 5, 8, 0, 7, 2, 3, 1, 6, 9], [7, 3, 1, 2, 5, 0, 8, 6, 9, 4], [0, 4, 5, 7, 3, 1, 6, 2, 8, 9], [0, 4, 2, 3, 9, 7, 1, 8, 5, 6], [0, 8, 7, 6, 2, 9, 3, 1, 5, 4], [7, 4, 2, 8, 0, 3, 5, 1, 9, 6], [7, 0, 4, 3, 5, 2, 8, 6, 1, 9], [7, 0, 1, 5, 3, 4, 8, 9, 2, 6], [7, 2, 1, 0, 8, 3, 6, 9, 5, 4], [7, 4, 1, 6, 5, 0, 2, 8, 9, 3], [5, 1, 7, 0, 3, 6, 2, 8, 9, 4], [5, 7, 2, 8, 3, 0, 6, 4, 9, 1], [7, 1, 2, 4, 3, 6, 0, 8, 9, 5], [5, 7, 2, 8, 3, 1, 4, 0, 6, 9], [2, 5, 1, 0, 8, 6, 3, 4, 7, 9], [1, 5, 3, 4, 6, 0, 7, 2, 8, 9], [0, 1, 7, 2, 5, 8, 9, 4, 3, 6], [7, 2, 3, 4, 8, 6, 1, 0, 9, 5], [7, 4, 1, 0, 2, 6, 3, 5, 8, 9], [5, 7, 4, 1, 0, 2, 9, 8, 3, 6], [7, 8, 2, 3, 0, 9, 1, 6, 5, 4], [7, 5, 2, 4, 8, 0, 1, 3, 6, 9], [7, 1, 5, 2, 0, 8, 6, 3, 9, 4], [7, 2, 3, 1, 0, 6, 5, 8, 9, 4], [2, 7, 8, 4, 0, 1, 5, 3, 6, 9], [7, 3, 0, 5, 8, 1, 2, 9, 6, 4], [7, 1, 0, 2, 5, 4, 8, 3, 6, 9], [3, 6, 5, 1, 8, 4, 7, 2, 0, 9], [4, 5, 2, 7, 3, 1, 0, 8, 9, 6], [2, 7, 8, 5, 6, 3, 1, 9, 4, 0], [7, 2, 4, 8, 1, 0, 6, 9, 5, 3], [5, 4, 8, 7, 9, 6, 1, 2, 0, 3], [6, 7, 2, 0, 1, 9, 8, 3, 4, 5], [7, 4, 5, 0, 1, 3, 2, 6, 8, 9], [7, 4, 5, 1, 2, 6, 0, 8, 3, 9], [2, 1, 5, 3, 0, 8, 9, 4, 6, 7], [3, 1, 2, 5, 7, 4, 6, 0, 8, 9], [5, 4, 8, 9, 3, 0, 1, 2, 6, 7], [4, 0, 6, 5, 3, 1, 7, 2, 8, 9], [2, 3, 5, 8, 9, 6, 1, 7, 0, 4], [1, 7, 2, 8, 4, 0, 9, 3, 6, 5], [4, 1, 7, 2, 5, 8, 3, 6, 0, 9], [4, 7, 2, 3, 1, 5, 8, 0, 6, 9], [5, 1, 4, 7, 8, 2, 0, 3, 9, 6], [3, 7, 0, 8, 2, 9, 5, 6, 4, 1], [0, 3, 7, 2, 1, 4, 5, 8, 9, 6], [7, 4, 5, 2, 9, 6, 3, 0, 8, 1], [6, 1, 9, 3, 0, 7, 2, 8, 4, 5], [7, 5, 0, 1, 3, 4, 6, 8, 9, 2], [4, 7, 5, 1, 0, 6, 2, 9, 8, 3], [1, 4, 7, 3, 5, 2, 0, 8, 6, 9], [4, 7, 1, 0, 3, 6, 8, 9, 2, 5], [2, 7, 8, 1, 0, 3, 6, 9, 5, 4], [5, 2, 7, 1, 8, 0, 4, 3, 6, 9], [0, 1, 7, 6, 2, 3, 9, 8, 5, 4], [7, 3, 2, 0, 6, 1, 8, 5, 4, 9], [7, 0, 4, 5, 3, 8, 1, 2, 6, 9], [2, 8, 1, 7, 0, 6, 3, 9, 5, 4], [7, 5, 6, 4, 1, 2, 0, 3, 8, 9], [3, 7, 5, 4, 2, 0, 8, 1, 6, 9], [5, 7, 2, 8, 9, 6, 0, 1, 3, 4], [4, 7, 2, 1, 3, 6, 0, 8, 5, 9], [1, 7, 6, 8, 2, 4, 0, 9, 3, 5], [1, 4, 7, 5, 0, 2, 3, 8, 6, 9], [6, 7, 1, 2, 0, 3, 5, 8, 4, 9], [7, 0, 4, 3, 2, 5, 8, 9, 6, 1], [5, 7, 1, 4, 2, 8, 0, 3, 9, 6], [4, 5, 7, 0, 1, 2, 6, 3, 8, 9], [5, 4, 1, 7, 2, 0, 6, 8, 3, 9], [4, 7, 1, 3, 2, 8, 5, 0, 9, 6], [4, 7, 2, 0, 1, 3, 6, 9, 8, 5], [6, 0, 7, 3, 2, 9, 1, 8, 4, 5], [7, 2, 1, 8, 0, 3, 9, 6, 4, 5], [7, 2, 4, 8, 5, 1, 3, 0, 9, 6], [4, 0, 7, 5, 1, 2, 8, 6, 3, 9], [4, 0, 7, 5, 2, 8, 3, 1, 6, 9], [0, 2, 4, 8, 5, 7, 6, 9, 1, 3], [7, 6, 1, 4, 2, 5, 0, 8, 3, 9], [4, 5, 0, 7, 2, 1, 8, 3, 9, 6], [0, 5, 2, 8, 9, 3, 1, 6, 7, 4], [7, 2, 8, 1, 4, 5, 0, 3, 9, 6], [1, 2, 7, 5, 4, 0, 3, 8, 6, 9], [4, 0, 5, 7, 3, 6, 2, 8, 9, 1], [5, 7, 2, 8, 9, 6, 3, 1, 0, 4], [0, 6, 9, 3, 8, 2, 7, 1, 5, 4], [5, 0, 6, 9, 1, 2, 7, 8, 4, 3], [1, 0, 5, 3, 6, 2, 4, 7, 8, 9], [7, 2, 0, 5, 4, 1, 6, 3, 8, 9], [7, 5, 4, 3, 1, 0, 2, 6, 8, 9], [2, 8, 7, 0, 6, 1, 9, 3, 5, 4], [7, 2, 8, 1, 4, 0, 6, 3, 9, 5], [7, 5, 8, 3, 4, 0, 9, 2, 1, 6], [7, 2, 4, 5, 3, 1, 8, 0, 6, 9], [8, 5, 3, 7, 2, 0, 4, 1, 9, 6], [7, 2, 9, 6, 5, 3, 1, 8, 0, 4], [2, 7, 1, 8, 6, 3, 0, 5, 4, 9], [7, 4, 1, 5, 3, 2, 0, 8, 9, 6], [4, 5, 2, 8, 7, 3, 1, 0, 9, 6], [3, 6, 5, 9, 0, 2, 7, 8, 4, 1], [1, 7, 2, 0, 8, 3, 5, 4, 6, 9], [7, 2, 9, 6, 8, 3, 0, 5, 4, 1], [7, 2, 1, 8, 6, 9, 5, 3, 0, 4], [4, 7, 5, 1, 8, 6, 0, 3, 9, 2], [7, 1, 6, 2, 3, 8, 9, 5, 0, 4], [2, 1, 4, 7, 5, 0, 3, 9, 8, 6], [7, 5, 4, 8, 9, 3, 1, 0, 6, 2], [1, 4, 5, 8, 6, 2, 7, 0, 3, 9], [7, 6, 2, 5, 3, 0, 4, 1, 8, 9], [6, 0, 7, 9, 1, 2, 8, 3, 4, 5], [7, 3, 0, 4, 5, 1, 8, 2, 9, 6], [4, 1, 3, 0, 2, 7, 8, 5, 9, 6], [1, 6, 8, 2, 0, 3, 9, 5, 4, 7], [2, 1, 0, 3, 6, 7, 4, 5, 8, 9], [0, 9, 3, 6, 2, 7, 8, 5, 4, 1], [2, 5, 8, 7, 1, 0, 4, 6, 9, 3], [7, 2, 4, 0, 1, 3, 5, 8, 9, 6], [5, 7, 4, 1, 0, 2, 3, 8, 9, 6], [7, 5, 4, 2, 1, 8, 6, 3, 0, 9], [5, 7, 1, 8, 2, 0, 9, 6, 3, 4], [7, 1, 4, 6, 0, 2, 5, 3, 8, 9], [7, 2, 5, 6, 3, 8, 1, 9, 0, 4], [4, 2, 7, 6, 3, 8, 0, 9, 1, 5], [1, 6, 3, 7, 2, 4, 8, 5, 0, 9], [2, 8, 1, 3, 5, 4, 7, 6, 9, 0], [4, 7, 5, 8, 1, 2, 3, 9, 0, 6], [8, 1, 9, 0, 2, 3, 7, 6, 5, 4], [7, 4, 2, 5, 8, 1, 6, 9, 0, 3], [3, 5, 7, 2, 8, 6, 1, 0, 9, 4], [1, 7, 2, 0, 3, 8, 4, 9, 5, 6], [1, 7, 2, 0, 8, 3, 6, 9, 5, 4], [5, 7, 2, 1, 8, 9, 3, 0, 4, 6], [7, 2, 8, 1, 0, 3, 9, 5, 4, 6], [7, 2, 8, 5, 0, 1, 3, 6, 9, 4], [4, 5, 7, 3, 0, 1, 6, 8, 9, 2], [7, 5, 4, 8, 2, 0, 3, 1, 6, 9], [7, 4, 1, 5, 2, 3, 8, 0, 9, 6], [1, 0, 9, 7, 3, 2, 8, 4, 6, 5], [4, 5, 0, 8, 7, 2, 9, 3, 6, 1], [5, 9, 2, 7, 1, 6, 8, 3, 4, 0], [2, 8, 1, 5, 7, 6, 4, 9, 3, 0], [1, 7, 5, 4, 0, 3, 6, 2, 8, 9], [5, 4, 7, 0, 2, 6, 3, 9, 8, 1], [4, 7, 6, 3, 1, 9, 2, 0, 8, 5], [7, 0, 8, 4, 5, 3, 2, 1, 6, 9], [4, 2, 5, 7, 1, 8, 0, 6, 3, 9], [4, 1, 7, 6, 8, 2, 3, 0, 9, 5], [4, 1, 5, 7, 2, 6, 0, 3, 8, 9], [4, 7, 1, 2, 8, 5, 6, 3, 0, 9], [0, 6, 1, 3, 7, 2, 8, 9, 4, 5], [4, 7, 5, 0, 1, 2, 6, 8, 3, 9], [7, 8, 2, 1, 3, 9, 0, 6, 4, 5], [7, 4, 1, 5, 3, 0, 9, 2, 8, 6], [7, 2, 8, 6, 0, 4, 1, 5, 3, 9], [3, 0, 7, 5, 8, 2, 6, 9, 1, 4], [5, 7, 2, 6, 0, 1, 3, 4, 9, 8], [3, 8, 2, 7, 5, 1, 0, 9, 6, 4], [7, 8, 2, 0, 9, 1, 6, 4, 5, 3], [7, 0, 2, 3, 8, 6, 4, 1, 9, 5], [7, 5, 1, 4, 8, 2, 6, 0, 3, 9], [2, 1, 4, 5, 7, 0, 6, 3, 8, 9], [6, 1, 3, 0, 5, 2, 7, 4, 8, 9], [1, 3, 6, 7, 2, 8, 4, 0, 9, 5], [7, 2, 6, 0, 8, 3, 1, 9, 5, 4], [4, 0, 1, 3, 7, 6, 2, 9, 8, 5], [1, 7, 6, 2, 0, 5, 9, 8, 4, 3], [7, 5, 4, 1, 8, 0, 3, 2, 9, 6], [4, 7, 1, 0, 3, 5, 8, 6, 2, 9], [7, 5, 3, 2, 6, 1, 8, 0, 9, 4], [7, 1, 8, 5, 2, 0, 3, 4, 6, 9], [7, 2, 8, 4, 1, 3, 5, 0, 9, 6], [0, 6, 3, 7, 2, 8, 4, 9, 1, 5], [4, 0, 5, 1, 7, 3, 2, 8, 9, 6], [7, 1, 0, 2, 6, 8, 4, 5, 3, 9], [2, 7, 3, 0, 8, 6, 5, 1, 4, 9], [5, 7, 3, 0, 8, 6, 2, 9, 1, 4], [7, 2, 5, 1, 6, 4, 0, 3, 8, 9], [2, 6, 8, 1, 7, 3, 0, 9, 4, 5], [7, 2, 0, 3, 6, 8, 4, 1, 5, 9], [5, 8, 7, 2, 3, 4, 0, 9, 6, 1], [2, 7, 8, 1, 0, 4, 5, 9, 3, 6], [1, 0, 3, 4, 9, 6, 7, 2, 8, 5], [0, 5, 3, 9, 1, 8, 7, 2, 4, 6], [6, 3, 1, 7, 4, 8, 2, 0, 5, 9], [7, 2, 4, 8, 5, 9, 6, 3, 0, 1], [2, 7, 3, 8, 9, 6, 1, 0, 5, 4], [0, 2, 5, 3, 6, 8, 1, 7, 4, 9], [7, 1, 2, 6, 3, 8, 0, 9, 4, 5], [5, 0, 2, 4, 3, 8, 1, 7, 6, 9], [5, 0, 2, 3, 7, 8, 6, 9, 1, 4], [1, 0, 6, 7, 5, 3, 8, 4, 9, 2], [3, 0, 7, 2, 9, 4, 5, 1, 6, 8], [2, 7, 3, 4, 8, 5, 6, 0, 1, 9], [4, 5, 7, 8, 1, 0, 2, 3, 9, 6], [6, 7, 2, 3, 0, 5, 1, 4, 8, 9], [7, 5, 4, 2, 6, 1, 0, 3, 9, 8], [7, 2, 3, 5, 4, 1, 0, 6, 8, 9], [7, 5, 1, 0, 2, 8, 4, 6, 9, 3], [4, 5, 7, 3, 1, 8, 9, 2, 6, 0], [7, 2, 5, 4, 0, 8, 6, 3, 9, 1], [7, 4, 8, 2, 5, 0, 3, 9, 6, 1], [2, 0, 7, 4, 6, 3, 8, 1, 5, 9], [7, 2, 1, 4, 0, 3, 6, 5, 8, 9], [4, 3, 5, 7, 8, 6, 1, 2, 0, 9], [2, 9, 7, 0, 8, 5, 4, 1, 3, 6], [4, 2, 5, 7, 0, 3, 9, 8, 6, 1], [0, 5, 4, 7, 1, 6, 8, 2, 9, 3], [0, 1, 7, 3, 5, 2, 4, 8, 6, 9], [4, 6, 0, 2, 5, 7, 8, 9, 3, 1], [7, 2, 1, 8, 3, 6, 0, 5, 4, 9], [1, 0, 3, 6, 8, 2, 9, 7, 5, 4], [0, 2, 5, 3, 8, 9, 7, 6, 1, 4], [0, 4, 2, 1, 7, 5, 8, 3, 6, 9], [2, 8, 1, 9, 6, 7, 3, 0, 5, 4], [1, 0, 5, 7, 2, 8, 4, 6, 9, 3], [4, 7, 2, 0, 3, 8, 1, 6, 9, 5], [3, 9, 0, 6, 4, 5, 1, 8, 2, 7], [2, 7, 1, 3, 8, 0, 6, 9, 5, 4], [7, 6, 2, 0, 4, 3, 5, 1, 8, 9], [1, 6, 2, 9, 3, 8, 7, 4, 0, 5], [7, 8, 2, 0, 4, 5, 3, 1, 6, 9], [6, 0, 7, 1, 2, 3, 9, 5, 8, 4], [7, 0, 1, 4, 6, 2, 5, 8, 3, 9], [7, 5, 2, 4, 0, 8, 3, 1, 9, 6], [4, 7, 0, 1, 3, 2, 5, 8, 9, 6], [5, 7, 1, 4, 2, 3, 0, 8, 9, 6], [4, 7, 2, 5, 0, 1, 8, 9, 3, 6], [6, 2, 7, 0, 8, 9, 5, 1, 4, 3], [3, 0, 7, 5, 2, 8, 4, 9, 1, 6], [4, 7, 5, 1, 3, 2, 0, 8, 6, 9], [5, 7, 4, 2, 0, 3, 8, 9, 1, 6], [1, 6, 7, 2, 5, 4, 8, 0, 3, 9], [7, 1, 8, 6, 2, 5, 9, 0, 3, 4], [4, 5, 7, 1, 2, 0, 8, 3, 6, 9], [2, 5, 0, 3, 1, 8, 9, 4, 6, 7], [7, 5, 0, 3, 8, 9, 2, 1, 6, 4], [7, 4, 0, 2, 5, 8, 1, 3, 6, 9], [6, 0, 3, 1, 7, 9, 2, 8, 4, 5], [7, 0, 1, 5, 6, 3, 4, 2, 8, 9], [6, 5, 7, 3, 2, 0, 8, 9, 4, 1], [5, 8, 7, 4, 2, 3, 0, 1, 6, 9], [7, 4, 5, 2, 8, 1, 0, 9, 6, 3], [2, 7, 0, 1, 3, 5, 6, 8, 9, 4], [7, 1, 2, 3, 0, 8, 6, 9, 5, 4], [7, 2, 0, 8, 5, 4, 9, 1, 3, 6], [5, 0, 6, 2, 3, 7, 8, 9, 1, 4], [0, 4, 3, 2, 6, 7, 9, 8, 5, 1], [1, 3, 5, 0, 7, 2, 8, 4, 6, 9], [7, 5, 4, 6, 3, 0, 9, 1, 8, 2], [0, 4, 7, 2, 5, 9, 8, 1, 3, 6], [2, 7, 8, 5, 4, 0, 3, 6, 9, 1], [4, 7, 5, 1, 6, 2, 8, 0, 3, 9], [7, 2, 8, 9, 4, 0, 3, 5, 6, 1], [6, 7, 5, 2, 8, 9, 0, 1, 4, 3], [7, 5, 4, 6, 2, 8, 0, 1, 3, 9], [4, 5, 1, 7, 8, 2, 3, 0, 9, 6], [2, 5, 0, 3, 7, 8, 4, 6, 9, 1], [7, 0, 8, 2, 1, 5, 3, 9, 6, 4], [7, 2, 5, 8, 3, 0, 6, 4, 9, 1], [7, 4, 2, 8, 1, 0, 3, 6, 5, 9], [0, 4, 7, 5, 2, 1, 3, 8, 9, 6], [5, 2, 3, 7, 4, 0, 8, 1, 6, 9], [8, 1, 5, 0, 6, 3, 4, 9, 7, 2], [1, 6, 0, 2, 8, 9, 7, 5, 3, 4], [7, 4, 2, 5, 3, 0, 8, 1, 6, 9], [7, 5, 4, 0, 3, 1, 8, 2, 9, 6], [4, 7, 6, 8, 0, 5, 1, 3, 9, 2], [0, 4, 2, 1, 5, 3, 6, 8, 9, 7], [7, 4, 0, 9, 6, 3, 1, 2, 8, 5], [7, 2, 0, 3, 4, 5, 6, 1, 8, 9], [6, 2, 0, 8, 7, 9, 1, 3, 5, 4], [7, 2, 8, 1, 6, 0, 5, 9, 3, 4], [5, 6, 4, 7, 2, 8, 1, 9, 0, 3], [7, 1, 6, 8, 3, 4, 5, 9, 0, 2], [4, 7, 5, 3, 1, 2, 8, 9, 6, 0], [7, 5, 8, 2, 6, 9, 0, 3, 4, 1], [7, 5, 4, 2, 8, 6, 0, 1, 3, 9], [7, 4, 2, 0, 8, 6, 3, 5, 1, 9], [7, 0, 6, 3, 9, 5, 8, 2, 1, 4], [7, 4, 2, 8, 1, 5, 3, 9, 6, 0], [3, 5, 2, 8, 7, 1, 0, 6, 4, 9], [4, 7, 0, 8, 3, 5, 6, 2, 1, 9], [6, 0, 3, 1, 2, 7, 8, 5, 9, 4], [5, 4, 7, 3, 8, 9, 0, 1, 2, 6], [5, 1, 9, 4, 3, 6, 7, 2, 8, 0], [7, 3, 8, 2, 0, 9, 5, 6, 4, 1], [7, 3, 2, 0, 8, 9, 1, 6, 4, 5], [7, 2, 1, 0, 6, 8, 3, 9, 4, 5], [7, 4, 2, 8, 3, 5, 1, 6, 0, 9], [6, 0, 1, 5, 9, 3, 8, 2, 7, 4], [0, 5, 6, 2, 3, 8, 1, 7, 4, 9], [1, 0, 3, 9, 6, 7, 2, 8, 5, 4], [8, 3, 2, 7, 5, 0, 1, 4, 6, 9], [7, 0, 2, 3, 4, 8, 6, 1, 5, 9], [7, 4, 3, 5, 0, 8, 9, 1, 2, 6], [4, 5, 0, 1, 3, 7, 6, 8, 9, 2], [6, 2, 7, 3, 0, 9, 4, 1, 5, 8], [5, 0, 4, 1, 6, 3, 7, 2, 9, 8], [7, 2, 3, 5, 8, 1, 0, 6, 9, 4], [3, 6, 0, 7, 5, 1, 9, 2, 4, 8], [4, 5, 7, 8, 1, 0, 2, 6, 9, 3], [1, 4, 7, 5, 2, 0, 8, 3, 9, 6], [5, 2, 0, 8, 3, 9, 1, 7, 4, 6], [4, 1, 8, 7, 5, 2, 6, 0, 9, 3], [1, 7, 8, 6, 2, 5, 0, 3, 9, 4], [7, 5, 1, 0, 3, 4, 6, 2, 9, 8], [4, 5, 7, 1, 8, 9, 6, 0, 2, 3], [4, 7, 2, 5, 8, 0, 1, 9, 6, 3], [5, 7, 2, 4, 1, 0, 3, 6, 8, 9], [7, 5, 8, 2, 0, 6, 1, 4, 9, 3], [0, 2, 1, 5, 8, 6, 4, 7, 3, 9], [7, 2, 8, 6, 5, 1, 9, 3, 0, 4], [7, 4, 3, 0, 2, 8, 1, 6, 5, 9], [8, 2, 3, 5, 7, 0, 1, 4, 9, 6], [5, 7, 4, 0, 8, 1, 2, 9, 3, 6], [3, 2, 7, 6, 0, 5, 9, 8, 1, 4], [2, 7, 3, 1, 6, 5, 0, 8, 9, 4], [3, 0, 7, 2, 8, 4, 6, 9, 1, 5], [5, 1, 2, 7, 3, 8, 9, 6, 4, 0], [8, 4, 2, 3, 0, 7, 5, 6, 9, 1], [5, 3, 0, 4, 7, 1, 8, 2, 9, 6], [0, 7, 2, 1, 4, 3, 8, 6, 9, 5], [2, 5, 7, 8, 9, 6, 0, 3, 1, 4], [7, 5, 1, 8, 2, 0, 9, 6, 3, 4], [7, 2, 8, 6, 3, 9, 5, 1, 0, 4], [2, 7, 8, 4, 3, 0, 9, 5, 1, 6], [4, 2, 7, 0, 3, 8, 6, 9, 5, 1], [4, 0, 2, 5, 7, 1, 3, 9, 8, 6], [7, 2, 0, 4, 8, 3, 6, 1, 9, 5], [7, 5, 4, 3, 0, 2, 8, 1, 9, 6], [1, 3, 9, 4, 0, 5, 8, 2, 7, 6], [5, 4, 2, 1, 0, 7, 9, 3, 6, 8], [5, 0, 8, 7, 2, 9, 3, 1, 6, 4], [0, 1, 3, 7, 4, 5, 9, 8, 2, 6], [5, 8, 0, 2, 3, 9, 7, 6, 4, 1], [6, 2, 8, 0, 1, 9, 7, 4, 3, 5], [0, 1, 3, 4, 2, 9, 8, 6, 5, 7], [4, 5, 2, 1, 6, 3, 0, 7, 8, 9], [3, 5, 4, 2, 0, 7, 8, 6, 1, 9], [1, 4, 5, 9, 0, 6, 8, 7, 3, 2], [6, 0, 9, 1, 3, 5, 4, 7, 2, 8], [2, 1, 6, 8, 5, 4, 7, 0, 3, 9], [4, 5, 7, 9, 1, 0, 2, 8, 6, 3], [1, 0, 6, 3, 7, 9, 2, 8, 4, 5], [7, 8, 2, 3, 5, 4, 0, 1, 9, 6], [7, 3, 0, 2, 8, 6, 1, 5, 9, 4], [0, 5, 7, 3, 2, 8, 1, 9, 6, 4], [7, 4, 5, 8, 0, 3, 2, 1, 6, 9], [0, 7, 5, 3, 4, 1, 6, 2, 8, 9], [4, 1, 2, 7, 8, 9, 3, 5, 6, 0], [7, 5, 2, 6, 8, 3, 1, 0, 9, 4], [4, 1, 6, 8, 0, 5, 2, 3, 7, 9], [5, 2, 3, 0, 1, 4, 7, 8, 6, 9], [7, 5, 0, 4, 6, 2, 8, 9, 3, 1], [5, 1, 7, 2, 6, 4, 8, 3, 9, 0], [7, 4, 1, 0, 2, 8, 6, 3, 5, 9], [6, 7, 2, 3, 0, 8, 1, 9, 4, 5], [4, 7, 8, 2, 5, 9, 1, 6, 0, 3], [0, 5, 3, 1, 2, 4, 7, 8, 6, 9], [7, 4, 5, 0, 3, 8, 2, 1, 6, 9], [1, 4, 7, 5, 3, 6, 9, 8, 2, 0], [7, 4, 5, 8, 3, 2, 9, 1, 0, 6], [7, 5, 2, 4, 1, 0, 3, 8, 9, 6], [1, 7, 8, 2, 4, 5, 6, 3, 9, 0], [2, 5, 7, 6, 1, 8, 0, 9, 3, 4], [7, 2, 0, 6, 3, 8, 5, 9, 1, 4], [1, 3, 2, 8, 6, 7, 0, 9, 4, 5], [1, 7, 6, 0, 5, 4, 3, 2, 8, 9], [2, 5, 6, 0, 9, 3, 7, 8, 1, 4], [7, 1, 5, 8, 2, 0, 3, 6, 9, 4], [5, 4, 7, 0, 2, 1, 3, 6, 8, 9], [7, 2, 8, 6, 3, 5, 0, 9, 1, 4], [2, 8, 0, 7, 5, 1, 9, 3, 4, 6], [2, 5, 4, 8, 3, 0, 1, 7, 9, 6], [1, 5, 0, 4, 7, 8, 2, 3, 6, 9], [0, 8, 5, 7, 1, 4, 2, 9, 6, 3], [5, 0, 7, 2, 6, 9, 3, 8, 4, 1], [7, 5, 8, 6, 4, 1, 2, 0, 3, 9], [6, 2, 1, 9, 5, 8, 7, 3, 4, 0], [4, 7, 0, 2, 8, 3, 5, 1, 6, 9], [7, 1, 8, 2, 3, 6, 0, 4, 5, 9], [6, 3, 9, 0, 7, 4, 8, 1, 2, 5], [4, 7, 1, 8, 2, 6, 0, 5, 3, 9], [7, 5, 1, 2, 8, 0, 6, 3, 9, 4], [1, 7, 2, 4, 8, 6, 0, 5, 3, 9], [2, 7, 1, 0, 6, 8, 3, 9, 4, 5], [1, 7, 0, 6, 2, 5, 4, 8, 3, 9], [5, 7, 1, 2, 8, 4, 0, 3, 6, 9], [1, 8, 7, 2, 4, 5, 0, 9, 6, 3], [1, 7, 4, 5, 0, 8, 3, 2, 9, 6], [7, 4, 1, 0, 5, 3, 6, 9, 2, 8], [3, 2, 8, 1, 7, 6, 5, 4, 0, 9], [7, 3, 1, 4, 2, 0, 5, 8, 6, 9], [7, 4, 5, 1, 2, 6, 0, 8, 3, 9], [5, 1, 0, 3, 2, 9, 8, 6, 7, 4], [4, 3, 0, 2, 8, 7, 1, 6, 5, 9], [1, 4, 6, 0, 3, 2, 7, 8, 9, 5], [7, 5, 4, 1, 2, 0, 3, 9, 6, 8], [0, 4, 7, 5, 6, 1, 8, 2, 3, 9], [7, 3, 8, 5, 2, 0, 6, 1, 9, 4], [1, 7, 2, 3, 0, 8, 6, 9, 4, 5], [0, 1, 8, 9, 7, 6, 5, 4, 3, 2], [0, 3, 6, 2, 7, 8, 9, 4, 5, 1], [6, 1, 7, 9, 0, 8, 2, 3, 4, 5], [8, 7, 2, 9, 0, 3, 5, 6, 1, 4], [5, 7, 0, 2, 1, 6, 3, 4, 8, 9], [3, 2, 0, 1, 6, 8, 9, 4, 7, 5], [5, 8, 3, 7, 2, 6, 1, 0, 9, 4], [5, 4, 7, 2, 8, 3, 0, 6, 9, 1], [5, 0, 1, 7, 4, 2, 6, 8, 9, 3], [0, 9, 2, 7, 5, 8, 3, 6, 4, 1], [7, 5, 4, 8, 0, 1, 3, 6, 9, 2], [2, 1, 7, 8, 5, 9, 3, 6, 0, 4], [2, 3, 8, 6, 7, 5, 0, 9, 4, 1], [7, 5, 4, 1, 0, 2, 3, 6, 8, 9], [4, 0, 1, 3, 7, 8, 9, 5, 2, 6], [7, 2, 5, 8, 1, 3, 0, 4, 6, 9], [0, 7, 8, 3, 5, 9, 6, 2, 1, 4], [7, 1, 4, 5, 3, 0, 8, 2, 6, 9], [7, 5, 0, 6, 2, 3, 8, 9, 1, 4], [4, 1, 3, 7, 6, 0, 5, 9, 2, 8], [6, 5, 1, 0, 3, 8, 7, 2, 4, 9], [4, 7, 1, 5, 6, 8, 2, 9, 3, 0], [5, 7, 8, 1, 4, 0, 3, 2, 9, 6], [5, 4, 1, 6, 7, 2, 0, 8, 3, 9], [1, 6, 4, 7, 0, 2, 5, 8, 3, 9], [6, 1, 2, 0, 7, 3, 8, 9, 4, 5], [1, 7, 4, 5, 8, 0, 3, 6, 9, 2], [7, 4, 1, 5, 8, 3, 9, 2, 6, 0], [3, 5, 4, 8, 0, 6, 9, 1, 2, 7], [7, 1, 3, 6, 5, 0, 2, 8, 4, 9], [4, 7, 3, 0, 2, 9, 1, 5, 6, 8], [2, 7, 0, 4, 5, 3, 1, 6, 8, 9], [7, 2, 3, 5, 0, 4, 1, 6, 8, 9], [1, 7, 4, 2, 3, 0, 5, 8, 6, 9], [4, 1, 7, 5, 0, 6, 2, 8, 9, 3], [0, 5, 7, 2, 4, 6, 3, 8, 1, 9], [4, 5, 7, 8, 0, 3, 1, 2, 9, 6], [0, 4, 5, 7, 1, 3, 2, 6, 8, 9], [7, 2, 4, 5, 8, 0, 3, 1, 6, 9], [2, 3, 0, 1, 5, 4, 7, 9, 8, 6], [7, 4, 2, 5, 8, 0, 1, 6, 9, 3], [4, 7, 5, 1, 2, 0, 3, 8, 6, 9], [0, 5, 7, 8, 2, 1, 3, 4, 9, 6], [5, 7, 8, 2, 1, 0, 3, 4, 9, 6], [7, 2, 8, 1, 3, 4, 0, 6, 5, 9], [4, 5, 7, 8, 3, 0, 1, 2, 6, 9], [1, 4, 5, 7, 6, 0, 3, 8, 2, 9], [2, 7, 8, 5, 4, 3, 1, 0, 9, 6], [7, 4, 2, 5, 0, 8, 1, 3, 6, 9], [3, 0, 1, 9, 6, 2, 8, 7, 4, 5], [7, 2, 5, 8, 4, 0, 3, 1, 9, 6], [7, 4, 5, 0, 3, 1, 2, 8, 6, 9], [7, 2, 5, 4, 8, 1, 0, 3, 9, 6], [4, 7, 1, 8, 3, 5, 2, 6, 0, 9], [2, 5, 0, 8, 3, 9, 7, 6, 4, 1], [1, 5, 4, 7, 8, 3, 2, 9, 6, 0], [4, 7, 0, 5, 1, 8, 3, 2, 6, 9], [2, 8, 6, 7, 9, 1, 5, 0, 3, 4], [1, 4, 7, 5, 0, 2, 8, 3, 6, 9], [7, 2, 4, 0, 3, 5, 1, 8, 9, 6], [5, 7, 8, 2, 6, 3, 0, 4, 9, 1], [5, 7, 2, 4, 6, 1, 3, 8, 0, 9], [0, 1, 6, 8, 5, 2, 9, 7, 4, 3], [5, 7, 2, 0, 6, 8, 1, 3, 9, 4], [4, 1, 0, 7, 2, 5, 3, 8, 9, 6], [1, 0, 6, 7, 2, 8, 3, 5, 4, 9], [7, 0, 3, 5, 2, 4, 6, 9, 8, 1], [2, 3, 0, 6, 5, 1, 7, 4, 8, 9], [1, 3, 5, 0, 6, 7, 4, 9, 8, 2], [1, 4, 7, 3, 0, 8, 2, 6, 5, 9], [0, 7, 1, 4, 8, 2, 9, 3, 6, 5], [4, 5, 0, 8, 3, 6, 9, 2, 1, 7], [5, 0, 4, 8, 9, 3, 2, 1, 6, 7], [4, 7, 1, 2, 8, 3, 6, 5, 0, 9], [4, 2, 5, 3, 7, 1, 8, 6, 0, 9], [0, 5, 2, 3, 6, 1, 8, 9, 7, 4], [1, 2, 5, 8, 4, 6, 0, 3, 9, 7], [7, 5, 8, 4, 2, 1, 0, 3, 6, 9], [3, 0, 2, 5, 7, 6, 4, 8, 9, 1], [7, 2, 4, 5, 8, 6, 1, 9, 0, 3], [7, 2, 1, 3, 0, 5, 6, 8, 9, 4], [4, 5, 7, 1, 2, 3, 8, 0, 9, 6], [4, 2, 0, 8, 9, 5, 6, 1, 7, 3], [5, 6, 0, 8, 3, 7, 2, 1, 4, 9], [4, 5, 9, 8, 2, 7, 0, 3, 1, 6], [0, 2, 3, 1, 8, 6, 5, 9, 7, 4], [4, 0, 5, 3, 7, 6, 8, 9, 2, 1], [5, 0, 1, 2, 3, 6, 4, 8, 7, 9], [7, 4, 2, 5, 0, 8, 6, 1, 3, 9], [2, 7, 0, 3, 4, 5, 6, 8, 9, 1], [7, 2, 0, 3, 1, 8, 9, 6, 5, 4], [2, 3, 0, 5, 4, 8, 9, 6, 1, 7], [5, 0, 2, 7, 8, 3, 9, 6, 1, 4], [7, 2, 8, 5, 4, 1, 0, 3, 6, 9], [6, 8, 3, 0, 9, 7, 2, 5, 1, 4], [3, 0, 4, 8, 9, 7, 6, 2, 5, 1], [7, 0, 8, 2, 5, 1, 3, 9, 6, 4], [3, 0, 9, 1, 2, 8, 7, 5, 4, 6], [7, 2, 4, 5, 8, 9, 1, 3, 0, 6], [7, 0, 2, 4, 1, 8, 5, 9, 6, 3], [7, 3, 1, 2, 0, 5, 8, 6, 9, 4], [4, 2, 1, 0, 7, 5, 6, 8, 3, 9], [1, 5, 4, 3, 7, 8, 9, 6, 2, 0], [7, 0, 2, 3, 1, 6, 8, 5, 4, 9], [5, 7, 2, 6, 0, 1, 8, 3, 9, 4], [7, 1, 6, 0, 3, 8, 2, 9, 5, 4], [4, 5, 7, 1, 9, 2, 3, 8, 0, 6], [4, 7, 8, 2, 0, 3, 1, 6, 5, 9], [4, 7, 2, 5, 3, 8, 1, 0, 9, 6], [0, 7, 2, 6, 3, 5, 8, 1, 4, 9], [7, 5, 0, 3, 1, 8, 2, 9, 6, 4], [7, 2, 6, 0, 1, 8, 3, 9, 5, 4], [1, 4, 5, 7, 2, 9, 6, 8, 3, 0], [2, 8, 7, 0, 3, 1, 9, 6, 4, 5], [7, 4, 5, 3, 2, 0, 8, 9, 1, 6], [7, 0, 8, 2, 1, 3, 6, 9, 5, 4], [7, 1, 0, 3, 6, 2, 5, 8, 9, 4], [8, 5, 7, 2, 0, 9, 1, 6, 4, 3], [7, 5, 1, 4, 2, 8, 0, 3, 6, 9], [2, 7, 0, 8, 6, 3, 1, 9, 5, 4], [7, 2, 0, 5, 3, 1, 4, 8, 6, 9], [7, 0, 4, 1, 2, 5, 3, 8, 6, 9], [2, 0, 7, 1, 8, 5, 3, 9, 4, 6], [4, 1, 5, 7, 3, 6, 2, 0, 8, 9], [4, 2, 7, 0, 9, 3, 6, 5, 8, 1], [7, 0, 4, 2, 5, 8, 1, 9, 3, 6], [0, 2, 5, 4, 3, 8, 7, 9, 6, 1], [5, 4, 7, 2, 6, 1, 3, 8, 0, 9], [7, 4, 1, 3, 8, 0, 5, 2, 9, 6], [4, 7, 2, 8, 0, 3, 9, 1, 6, 5], [7, 3, 4, 5, 1, 8, 6, 2, 0, 9], [7, 4, 5, 1, 2, 0, 3, 6, 8, 9], [3, 7, 8, 5, 9, 1, 4, 6, 2, 0], [1, 7, 2, 0, 6, 8, 9, 5, 3, 4], [5, 9, 7, 2, 8, 3, 0, 6, 4, 1], [0, 7, 1, 2, 4, 5, 8, 9, 6, 3], [1, 3, 6, 5, 0, 7, 2, 4, 9, 8], [2, 7, 3, 8, 0, 6, 9, 1, 4, 5], [7, 4, 5, 2, 1, 3, 0, 6, 9, 8], [3, 7, 5, 2, 9, 4, 8, 1, 6, 0], [0, 5, 7, 1, 6, 2, 4, 3, 8, 9], [6, 7, 2, 0, 8, 1, 9, 3, 5, 4], [7, 1, 5, 3, 4, 0, 8, 2, 6, 9], [7, 1, 4, 9, 5, 8, 2, 0, 3, 6], [7, 2, 1, 3, 5, 8, 6, 4, 0, 9], [4, 5, 7, 3, 0, 2, 8, 6, 1, 9], [3, 2, 8, 9, 1, 0, 6, 5, 7, 4], [5, 4, 0, 1, 3, 7, 2, 8, 9, 6], [4, 7, 3, 2, 5, 8, 6, 0, 9, 1], [7, 8, 2, 6, 1, 0, 3, 9, 5, 4], [0, 7, 5, 8, 2, 3, 1, 4, 6, 9], [4, 5, 7, 0, 8, 2, 1, 9, 6, 3], [4, 3, 7, 5, 9, 1, 0, 2, 6, 8], [7, 4, 1, 9, 6, 5, 0, 8, 2, 3], [4, 0, 7, 5, 8, 2, 1, 3, 6, 9], [5, 3, 7, 0, 4, 2, 8, 6, 1, 9], [7, 4, 2, 0, 6, 9, 8, 1, 5, 3], [1, 8, 4, 9, 2, 7, 3, 5, 6, 0], [4, 7, 5, 1, 3, 0, 2, 8, 6, 9], [6, 5, 1, 0, 3, 9, 8, 7, 2, 4], [4, 5, 0, 1, 3, 8, 7, 2, 9, 6], [7, 4, 5, 2, 3, 1, 8, 0, 9, 6], [7, 3, 2, 8, 6, 0, 1, 9, 5, 4], [7, 3, 2, 0, 8, 5, 6, 4, 1, 9], [7, 5, 0, 4, 2, 3, 8, 6, 1, 9], [5, 7, 0, 2, 6, 8, 4, 9, 1, 3], [0, 2, 6, 8, 4, 7, 9, 5, 3, 1], [3, 0, 1, 5, 4, 6, 9, 7, 2, 8], [7, 2, 0, 8, 3, 1, 6, 9, 5, 4], [2, 8, 5, 7, 1, 3, 9, 6, 0, 4], [0, 8, 4, 5, 7, 2, 1, 3, 9, 6], [4, 1, 0, 6, 5, 9, 8, 2, 7, 3], [7, 2, 8, 6, 9, 1, 0, 3, 5, 4], [7, 4, 1, 9, 2, 0, 3, 5, 8, 6], [0, 7, 2, 3, 5, 6, 8, 4, 1, 9], [3, 2, 7, 1, 8, 5, 6, 0, 9, 4], [7, 2, 0, 3, 5, 8, 9, 6, 1, 4], [4, 7, 1, 8, 3, 6, 2, 9, 0, 5], [3, 0, 8, 2, 9, 7, 5, 4, 6, 1], [0, 7, 2, 8, 9, 5, 6, 3, 1, 4], [2, 5, 8, 7, 6, 1, 3, 9, 0, 4], [7, 2, 4, 1, 0, 3, 5, 8, 9, 6], [0, 7, 2, 4, 5, 3, 1, 8, 9, 6], [8, 9, 4, 1, 2, 7, 0, 3, 5, 6], [0, 6, 1, 2, 3, 5, 9, 7, 8, 4], [3, 2, 0, 6, 7, 9, 5, 4, 8, 1], [8, 3, 0, 2, 6, 9, 5, 4, 7, 1], [7, 2, 4, 5, 0, 1, 8, 6, 3, 9], [4, 5, 0, 7, 1, 2, 3, 6, 8, 9], [4, 5, 0, 7, 2, 8, 9, 3, 1, 6], [5, 2, 3, 0, 7, 8, 1, 9, 6, 4], [6, 2, 7, 0, 3, 8, 9, 5, 1, 4], [5, 0, 7, 2, 3, 1, 4, 6, 8, 9], [7, 2, 5, 0, 8, 9, 6, 3, 1, 4], [7, 2, 8, 5, 0, 3, 1, 9, 6, 4], [3, 0, 7, 1, 2, 8, 4, 5, 6, 9], [1, 2, 7, 0, 3, 6, 8, 5, 4, 9], [1, 0, 3, 7, 5, 2, 8, 6, 4, 9], [7, 4, 5, 8, 2, 1, 0, 3, 6, 9], [2, 3, 5, 4, 0, 7, 9, 8, 6, 1], [7, 1, 2, 4, 5, 6, 8, 0, 3, 9], [0, 6, 1, 9, 3, 2, 4, 5, 7, 8], [5, 4, 7, 1, 8, 0, 3, 6, 2, 9], [7, 4, 5, 1, 2, 0, 8, 6, 3, 9], [4, 7, 2, 5, 8, 0, 3, 1, 6, 9], [5, 0, 1, 7, 4, 2, 8, 6, 9, 3], [0, 5, 4, 1, 7, 6, 3, 2, 8, 9], [7, 3, 5, 2, 8, 0, 1, 4, 6, 9], [7, 4, 2, 3, 0, 1, 8, 6, 9, 5], [5, 2, 7, 6, 1, 3, 8, 4, 0, 9], [0, 6, 3, 9, 1, 5, 8, 2, 7, 4], [7, 8, 2, 1, 0, 6, 5, 3, 9, 4], [1, 9, 7, 4, 2, 8, 0, 3, 5, 6], [6, 2, 7, 5, 4, 0, 8, 3, 1, 9], [0, 7, 1, 3, 2, 8, 6, 5, 9, 4], [7, 4, 9, 2, 6, 0, 5, 3, 8, 1], [7, 5, 3, 0, 6, 1, 2, 8, 9, 4], [4, 8, 3, 5, 7, 9, 6, 2, 0, 1], [4, 7, 2, 1, 6, 0, 8, 3, 5, 9], [4, 7, 3, 5, 2, 8, 1, 6, 0, 9], [9, 6, 8, 1, 2, 3, 0, 5, 7, 4], [5, 7, 4, 2, 9, 1, 3, 0, 8, 6], [6, 0, 3, 5, 7, 4, 2, 8, 1, 9], [5, 3, 8, 7, 2, 0, 9, 1, 6, 4], [4, 5, 7, 1, 6, 0, 2, 3, 8, 9], [7, 2, 4, 5, 1, 0, 8, 6, 9, 3], [3, 0, 9, 7, 2, 8, 6, 1, 5, 4], [4, 6, 5, 3, 2, 0, 1, 8, 9, 7], [7, 1, 4, 5, 0, 2, 8, 9, 3, 6], [2, 0, 1, 7, 4, 3, 8, 5, 6, 9], [7, 1, 4, 2, 0, 5, 8, 3, 6, 9], [4, 0, 7, 3, 9, 6, 8, 1, 5, 2], [7, 8, 2, 1, 3, 0, 6, 9, 4, 5], [7, 4, 5, 2, 0, 3, 8, 9, 1, 6], [7, 2, 6, 1, 3, 8, 5, 9, 4, 0], [1, 7, 2, 0, 3, 8, 5, 9, 6, 4], [5, 7, 2, 8, 1, 9, 0, 3, 6, 4], [7, 5, 2, 3, 4, 0, 8, 1, 9, 6], [2, 8, 7, 0, 6, 3, 9, 1, 4, 5], [1, 7, 2, 8, 3, 0, 6, 9, 5, 4], [7, 8, 2, 5, 0, 4, 1, 3, 6, 9], [1, 3, 0, 7, 2, 9, 6, 8, 4, 5], [4, 7, 3, 0, 1, 6, 2, 5, 8, 9], [4, 7, 5, 0, 2, 1, 8, 6, 3, 9], [1, 0, 3, 9, 6, 5, 8, 7, 2, 4], [5, 2, 0, 9, 8, 4, 7, 3, 6, 1], [1, 7, 0, 4, 5, 2, 6, 8, 3, 9], [7, 4, 0, 2, 1, 5, 8, 3, 6, 9], [4, 5, 0, 1, 6, 3, 7, 2, 8, 9], [5, 0, 3, 4, 2, 7, 8, 6, 9, 1], [5, 6, 0, 7, 2, 8, 1, 4, 9, 3], [8, 1, 4, 5, 7, 0, 6, 2, 3, 9], [2, 1, 0, 5, 7, 8, 3, 9, 4, 6], [4, 5, 8, 2, 7, 3, 1, 9, 0, 6], [1, 0, 5, 3, 4, 7, 6, 2, 9, 8], [7, 0, 4, 2, 1, 3, 8, 6, 9, 5], [7, 4, 5, 0, 2, 8, 1, 9, 3, 6], [4, 1, 3, 5, 6, 0, 9, 7, 8, 2], [7, 0, 4, 2, 5, 3, 8, 1, 9, 6], [4, 5, 7, 1, 2, 0, 6, 3, 8, 9], [7, 2, 3, 8, 9, 0, 6, 5, 4, 1], [7, 0, 2, 4, 6, 3, 1, 8, 5, 9], [5, 7, 6, 4, 3, 2, 9, 8, 0, 1], [8, 1, 5, 4, 6, 7, 0, 2, 3, 9], [7, 1, 5, 2, 4, 8, 6, 9, 0, 3], [0, 2, 7, 9, 6, 5, 3, 8, 4, 1], [4, 0, 7, 2, 8, 1, 6, 9, 3, 5], [4, 7, 1, 5, 0, 8, 6, 2, 3, 9], [6, 9, 3, 0, 7, 2, 5, 1, 8, 4], [2, 1, 7, 3, 8, 9, 5, 6, 0, 4], [6, 5, 7, 4, 2, 1, 3, 0, 8, 9], [7, 5, 4, 6, 3, 0, 2, 8, 1, 9], [1, 3, 6, 0, 4, 5, 7, 8, 2, 9], [7, 3, 0, 2, 1, 4, 6, 8, 9, 5], [4, 7, 2, 0, 3, 8, 5, 9, 6, 1], [2, 0, 1, 3, 5, 7, 8, 6, 9, 4], [1, 0, 5, 6, 4, 2, 7, 9, 8, 3], [1, 3, 5, 2, 7, 0, 6, 4, 8, 9], [3, 5, 2, 4, 1, 8, 7, 0, 9, 6], [4, 5, 0, 7, 2, 8, 1, 9, 6, 3], [4, 1, 7, 5, 2, 8, 6, 3, 0, 9], [4, 7, 1, 0, 5, 3, 2, 8, 6, 9], [2, 5, 7, 6, 1, 0, 8, 3, 9, 4], [1, 0, 5, 2, 7, 8, 6, 9, 3, 4], [9, 6, 3, 0, 8, 2, 1, 4, 7, 5], [5, 0, 4, 3, 8, 1, 9, 7, 2, 6], [3, 0, 2, 7, 5, 8, 6, 9, 4, 1], [7, 4, 5, 1, 2, 8, 0, 6, 9, 3], [7, 4, 1, 5, 0, 2, 6, 8, 3, 9], [7, 5, 4, 0, 1, 2, 6, 3, 8, 9], [5, 7, 6, 3, 0, 2, 8, 9, 4, 1], [4, 1, 5, 8, 0, 9, 2, 7, 3, 6], [3, 2, 6, 0, 9, 8, 7, 1, 5, 4], [7, 0, 5, 2, 6, 8, 9, 1, 3, 4], [7, 4, 3, 1, 0, 2, 6, 5, 8, 9], [1, 4, 3, 7, 6, 5, 2, 9, 0, 8], [9, 0, 7, 3, 2, 8, 4, 1, 6, 5], [1, 5, 2, 4, 6, 7, 8, 0, 3, 9], [7, 2, 8, 4, 1, 3, 0, 5, 9, 6], [4, 5, 3, 1, 0, 7, 2, 6, 8, 9], [7, 2, 9, 1, 8, 3, 6, 5, 0, 4], [4, 1, 7, 5, 3, 2, 8, 0, 6, 9], [6, 2, 7, 4, 0, 5, 1, 8, 9, 3], [7, 2, 8, 4, 1, 6, 0, 9, 3, 5], [0, 7, 1, 2, 3, 5, 8, 6, 4, 9], [4, 0, 1, 9, 8, 7, 2, 5, 3, 6], [1, 5, 4, 9, 0, 3, 8, 7, 2, 6], [8, 5, 4, 7, 2, 1, 6, 3, 9, 0], [4, 1, 7, 3, 8, 2, 0, 5, 9, 6], [8, 3, 7, 1, 0, 6, 2, 9, 4, 5], [1, 0, 6, 5, 4, 9, 3, 8, 2, 7], [4, 5, 7, 2, 0, 3, 1, 8, 9, 6], [4, 5, 7, 2, 0, 3, 1, 8, 6, 9], [7, 3, 5, 0, 2, 4, 9, 8, 6, 1], [7, 3, 2, 6, 0, 9, 8, 4, 1, 5], [7, 2, 5, 0, 3, 8, 9, 4, 6, 1], [5, 8, 9, 3, 6, 2, 1, 0, 7, 4], [7, 3, 8, 0, 2, 9, 1, 6, 5, 4], [4, 7, 0, 1, 5, 2, 8, 3, 6, 9], [7, 1, 5, 4, 2, 8, 3, 0, 6, 9], [4, 5, 7, 2, 0, 8, 1, 3, 6, 9], [1, 3, 2, 7, 6, 8, 4, 0, 5, 9], [4, 5, 7, 0, 2, 1, 6, 9, 8, 3], [5, 7, 3, 4, 1, 2, 0, 6, 8, 9], [7, 4, 6, 1, 2, 8, 0, 9, 3, 5], [0, 6, 8, 3, 1, 7, 2, 9, 5, 4], [1, 0, 4, 5, 3, 7, 8, 2, 6, 9], [5, 9, 3, 7, 1, 4, 0, 2, 8, 6], [7, 5, 0, 2, 3, 1, 9, 6, 8, 4], [7, 2, 0, 5, 3, 4, 8, 6, 1, 9], [1, 0, 6, 9, 3, 8, 2, 7, 4, 5], [2, 7, 1, 4, 5, 9, 0, 6, 8, 3], [7, 4, 5, 0, 3, 1, 2, 6, 8, 9], [7, 0, 3, 6, 2, 9, 8, 4, 5, 1], [7, 4, 2, 8, 5, 0, 1, 3, 6, 9], [7, 2, 1, 5, 8, 4, 6, 3, 9, 0], [5, 2, 8, 3, 7, 1, 0, 6, 9, 4], [7, 1, 6, 4, 5, 8, 2, 0, 3, 9], [4, 0, 7, 2, 1, 8, 9, 6, 5, 3], [1, 6, 3, 0, 5, 8, 9, 7, 4, 2], [5, 4, 7, 8, 2, 6, 3, 0, 9, 1], [0, 3, 6, 5, 2, 9, 1, 8, 7, 4], [7, 2, 4, 6, 0, 9, 8, 3, 1, 5], [0, 6, 2, 3, 9, 8, 5, 7, 4, 1], [7, 2, 8, 1, 4, 9, 3, 5, 6, 0], [7, 4, 2, 5, 1, 3, 0, 8, 6, 9], [0, 6, 3, 7, 2, 8, 5, 9, 4, 1], [7, 0, 3, 2, 8, 4, 5, 6, 1, 9], [5, 2, 6, 4, 7, 1, 0, 8, 3, 9], [7, 2, 5, 4, 0, 1, 9, 8, 6, 3], [2, 7, 8, 6, 4, 3, 0, 1, 5, 9], [7, 6, 8, 4, 2, 1, 5, 9, 3, 0], [2, 8, 7, 6, 3, 0, 9, 5, 1, 4], [9, 8, 4, 5, 1, 7, 2, 3, 0, 6], [7, 1, 0, 8, 4, 3, 2, 5, 9, 6], [7, 2, 3, 5, 0, 6, 1, 8, 4, 9], [2, 7, 1, 8, 0, 3, 5, 4, 9, 6], [7, 0, 4, 1, 8, 3, 5, 2, 9, 6], [7, 2, 4, 5, 0, 8, 6, 1, 9, 3], [1, 7, 4, 8, 5, 2, 9, 6, 3, 0], [2, 8, 5, 4, 6, 1, 7, 0, 3, 9], [2, 3, 5, 8, 0, 7, 6, 1, 9, 4], [3, 4, 0, 7, 2, 5, 8, 1, 9, 6], [7, 1, 8, 0, 2, 3, 6, 9, 5, 4], [1, 4, 0, 6, 3, 2, 8, 7, 9, 5], [5, 0, 6, 7, 3, 4, 2, 1, 8, 9], [2, 7, 8, 6, 3, 9, 1, 0, 5, 4], [7, 1, 2, 8, 0, 3, 6, 9, 4, 5], [4, 7, 5, 2, 1, 8, 0, 9, 3, 6], [4, 2, 7, 1, 0, 8, 9, 6, 3, 5], [1, 5, 4, 3, 8, 7, 2, 0, 9, 6], [2, 1, 4, 9, 5, 6, 8, 7, 0, 3], [1, 0, 3, 7, 8, 2, 9, 4, 6, 5], [6, 0, 7, 2, 9, 1, 4, 5, 3, 8], [7, 8, 9, 2, 0, 5, 3, 4, 6, 1], [0, 8, 2, 1, 7, 3, 4, 9, 6, 5], [3, 0, 7, 2, 4, 5, 1, 6, 9, 8], [7, 4, 5, 2, 0, 8, 3, 9, 1, 6], [1, 8, 5, 7, 2, 0, 3, 6, 9, 4], [7, 5, 1, 8, 6, 9, 2, 0, 3, 4], [1, 5, 2, 7, 6, 0, 8, 9, 3, 4], [2, 7, 1, 8, 4, 9, 6, 5, 0, 3], [7, 0, 2, 9, 8, 3, 1, 6, 4, 5], [0, 3, 6, 1, 9, 5, 8, 2, 7, 4], [7, 2, 8, 5, 1, 0, 3, 6, 4, 9], [1, 0, 8, 3, 6, 2, 7, 9, 5, 4], [5, 7, 4, 2, 0, 3, 1, 9, 6, 8], [7, 2, 3, 0, 4, 1, 9, 5, 8, 6], [5, 2, 8, 7, 0, 6, 3, 1, 9, 4], [7, 8, 2, 1, 5, 4, 6, 0, 3, 9], [4, 7, 3, 5, 2, 1, 8, 9, 0, 6], [7, 2, 5, 6, 3, 8, 0, 1, 4, 9], [7, 4, 5, 0, 8, 1, 2, 6, 9, 3], [7, 2, 8, 3, 9, 0, 6, 1, 5, 4], [7, 2, 8, 0, 3, 6, 1, 4, 5, 9], [8, 2, 4, 5, 7, 3, 9, 1, 0, 6], [0, 7, 6, 8, 5, 1, 9, 2, 3, 4], [3, 7, 8, 6, 5, 2, 1, 0, 4, 9], [7, 2, 0, 5, 9, 8, 1, 6, 4, 3], [4, 0, 1, 5, 8, 3, 7, 2, 9, 6], [3, 4, 7, 0, 2, 5, 8, 6, 1, 9], [3, 8, 1, 2, 7, 9, 5, 0, 6, 4], [4, 2, 1, 0, 3, 8, 9, 5, 6, 7], [5, 7, 1, 4, 0, 2, 8, 6, 3, 9], [4, 0, 5, 2, 7, 3, 9, 8, 1, 6], [7, 0, 2, 3, 6, 8, 1, 9, 5, 4], [5, 3, 0, 6, 9, 1, 7, 2, 4, 8], [4, 5, 7, 0, 1, 2, 8, 6, 3, 9], [7, 2, 0, 4, 5, 3, 8, 6, 1, 9], [0, 6, 7, 2, 1, 3, 5, 4, 8, 9], [6, 7, 5, 2, 8, 4, 1, 9, 0, 3], [4, 0, 7, 5, 2, 8, 3, 6, 9, 1], [7, 5, 4, 0, 2, 3, 6, 8, 9, 1], [7, 4, 0, 1, 2, 5, 8, 6, 3, 9], [7, 0, 2, 3, 8, 1, 6, 4, 5, 9], [8, 3, 9, 5, 1, 2, 0, 4, 6, 7], [1, 5, 4, 7, 0, 3, 2, 9, 8, 6], [4, 1, 7, 2, 0, 9, 8, 6, 3, 5], [7, 4, 8, 0, 3, 2, 1, 9, 5, 6], [7, 2, 8, 9, 5, 4, 1, 0, 6, 3], [1, 7, 2, 8, 3, 6, 0, 9, 5, 4], [2, 7, 3, 0, 8, 9, 6, 1, 5, 4], [4, 5, 2, 0, 1, 3, 7, 8, 6, 9], [5, 7, 4, 1, 0, 2, 3, 8, 9, 6], [2, 8, 3, 6, 1, 0, 5, 4, 9, 7], [7, 5, 6, 8, 2, 4, 9, 1, 3, 0], [2, 7, 5, 4, 3, 1, 0, 6, 8, 9], [7, 1, 4, 2, 5, 3, 8, 6, 9, 0], [7, 0, 1, 5, 2, 4, 3, 8, 6, 9], [7, 0, 5, 2, 8, 9, 1, 3, 6, 4], [5, 7, 0, 2, 6, 8, 9, 1, 3, 4], [7, 2, 8, 9, 3, 5, 0, 6, 1, 4], [7, 5, 4, 1, 2, 0, 6, 8, 9, 3], [7, 4, 1, 8, 0, 6, 5, 2, 3, 9], [7, 2, 0, 1, 8, 4, 6, 3, 9, 5], [7, 5, 2, 1, 8, 0, 3, 9, 6, 4], [1, 5, 7, 8, 2, 6, 3, 0, 9, 4], [1, 0, 2, 8, 3, 7, 4, 6, 9, 5], [5, 4, 0, 1, 3, 6, 9, 7, 2, 8], [4, 7, 2, 0, 3, 5, 8, 9, 1, 6], [7, 5, 4, 2, 8, 3, 0, 6, 9, 1], [0, 6, 2, 5, 8, 9, 7, 4, 3, 1], [0, 6, 7, 5, 1, 3, 4, 2, 8, 9], [7, 5, 4, 0, 2, 3, 1, 9, 8, 6], [1, 2, 6, 0, 8, 3, 5, 9, 7, 4], [7, 5, 0, 1, 2, 8, 3, 9, 6, 4], [7, 4, 0, 6, 1, 2, 3, 8, 9, 5], [4, 0, 1, 2, 5, 3, 8, 7, 9, 6], [7, 2, 5, 4, 1, 0, 3, 8, 9, 6], [7, 0, 3, 6, 5, 1, 8, 9, 2, 4], [2, 9, 6, 0, 8, 7, 4, 3, 5, 1], [7, 2, 8, 6, 9, 5, 0, 1, 3, 4], [2, 8, 7, 3, 0, 1, 4, 5, 9, 6], [4, 5, 0, 7, 8, 6, 2, 9, 3, 1], [7, 1, 4, 5, 6, 2, 8, 0, 9, 3], [5, 7, 2, 6, 3, 0, 1, 8, 4, 9], [4, 0, 7, 1, 5, 6, 3, 2, 8, 9], [5, 4, 2, 6, 7, 1, 3, 8, 0, 9], [6, 8, 3, 0, 7, 1, 2, 9, 5, 4], [4, 2, 0, 5, 3, 9, 8, 1, 6, 7], [7, 5, 4, 8, 0, 2, 6, 3, 9, 1], [4, 5, 7, 1, 0, 8, 2, 3, 6, 9], [7, 8, 5, 3, 4, 2, 6, 0, 9, 1], [1, 2, 5, 3, 7, 0, 8, 6, 9, 4], [4, 7, 1, 0, 2, 5, 8, 3, 9, 6], [5, 4, 3, 7, 2, 0, 8, 1, 9, 6], [5, 4, 3, 7, 6, 2, 0, 1, 9, 8], [1, 6, 3, 5, 0, 7, 4, 2, 9, 8], [7, 5, 0, 6, 9, 3, 2, 8, 4, 1], [7, 4, 0, 2, 3, 8, 9, 6, 5, 1], [7, 2, 8, 9, 6, 0, 5, 4, 3, 1], [7, 2, 0, 4, 3, 1, 8, 6, 5, 9], [7, 2, 0, 6, 5, 8, 4, 1, 3, 9], [1, 5, 4, 6, 2, 7, 9, 8, 3, 0], [7, 0, 3, 2, 6, 8, 9, 1, 5, 4], [4, 5, 1, 2, 0, 7, 6, 8, 3, 9], [7, 2, 4, 8, 1, 0, 3, 6, 5, 9], [3, 2, 7, 0, 8, 6, 9, 4, 1, 5], [4, 1, 6, 8, 2, 7, 0, 3, 5, 9], [4, 1, 2, 8, 5, 0, 6, 3, 9, 7], [5, 4, 0, 7, 3, 8, 2, 1, 9, 6], [0, 5, 7, 4, 2, 3, 6, 8, 1, 9], [2, 8, 7, 1, 3, 5, 4, 0, 6, 9], [0, 4, 1, 5, 7, 2, 9, 3, 8, 6], [7, 2, 8, 9, 3, 0, 6, 1, 4, 5], [2, 3, 1, 8, 7, 4, 5, 9, 6, 0], [3, 7, 2, 0, 6, 9, 8, 1, 4, 5], [5, 9, 6, 0, 8, 3, 7, 2, 1, 4], [5, 4, 1, 7, 8, 2, 3, 0, 9, 6], [7, 5, 1, 4, 2, 8, 0, 6, 3, 9], [6, 2, 1, 0, 7, 5, 4, 8, 3, 9], [8, 0, 4, 5, 7, 2, 1, 3, 9, 6], [4, 7, 0, 2, 5, 8, 1, 3, 9, 6], [7, 0, 3, 2, 8, 6, 9, 4, 1, 5], [7, 2, 8, 1, 9, 0, 6, 3, 5, 4], [4, 8, 5, 3, 7, 1, 0, 6, 2, 9], [4, 7, 5, 0, 1, 6, 9, 8, 3, 2], [5, 1, 0, 3, 6, 7, 8, 9, 2, 4], [2, 8, 3, 9, 7, 0, 6, 1, 5, 4], [7, 8, 5, 4, 1, 2, 6, 3, 0, 9], [8, 7, 2, 0, 5, 6, 4, 1, 3, 9], [7, 1, 2, 8, 6, 4, 5, 0, 3, 9], [7, 4, 1, 5, 3, 0, 2, 6, 9, 8], [7, 0, 4, 3, 8, 6, 1, 2, 9, 5], [2, 0, 7, 3, 5, 4, 1, 6, 8, 9], [7, 4, 2, 5, 9, 3, 0, 1, 8, 6], [6, 1, 3, 2, 7, 0, 8, 4, 9, 5], [1, 7, 2, 3, 5, 4, 8, 0, 9, 6], [7, 5, 2, 1, 3, 0, 6, 4, 8, 9], [4, 0, 1, 3, 9, 8, 2, 7, 5, 6], [7, 5, 1, 3, 6, 2, 0, 4, 8, 9], [1, 5, 3, 6, 7, 2, 8, 0, 9, 4], [0, 3, 2, 1, 4, 5, 7, 8, 9, 6], [4, 5, 3, 7, 6, 2, 0, 8, 1, 9], [4, 1, 0, 3, 5, 7, 2, 8, 6, 9], [7, 4, 5, 0, 3, 1, 2, 8, 9, 6], [0, 8, 2, 6, 7, 3, 9, 1, 5, 4], [4, 7, 5, 0, 3, 1, 2, 8, 9, 6], [7, 8, 1, 2, 3, 4, 5, 6, 9, 0], [7, 1, 6, 0, 4, 9, 3, 5, 2, 8], [7, 2, 3, 0, 1, 8, 5, 9, 4, 6], [7, 2, 8, 9, 4, 3, 6, 5, 1, 0], [4, 7, 5, 1, 6, 9, 0, 3, 8, 2], [7, 1, 4, 5, 0, 3, 2, 8, 6, 9], [0, 7, 6, 3, 8, 1, 9, 2, 4, 5], [7, 2, 0, 5, 1, 8, 3, 6, 4, 9], [7, 2, 4, 1, 8, 0, 5, 3, 9, 6], [0, 1, 8, 7, 2, 6, 3, 4, 5, 9], [7, 4, 5, 0, 8, 2, 3, 9, 1, 6], [7, 3, 2, 8, 0, 6, 9, 1, 5, 4], [1, 7, 0, 6, 8, 2, 5, 4, 3, 9], [4, 5, 7, 6, 1, 0, 3, 2, 8, 9], [8, 3, 4, 2, 7, 0, 9, 6, 5, 1], [7, 5, 2, 8, 3, 9, 0, 6, 1, 4], [7, 2, 9, 8, 5, 6, 1, 0, 3, 4], [5, 8, 7, 9, 2, 0, 3, 6, 1, 4], [7, 2, 4, 0, 3, 8, 6, 1, 9, 5], [0, 6, 1, 3, 2, 8, 9, 7, 4, 5], [0, 3, 9, 1, 6, 8, 7, 4, 2, 5], [7, 5, 4, 0, 8, 3, 2, 1, 6, 9], [0, 1, 3, 8, 6, 2, 9, 5, 7, 4], [9, 0, 3, 6, 5, 2, 7, 8, 1, 4], [6, 2, 1, 7, 0, 8, 4, 5, 3, 9], [7, 3, 1, 0, 8, 5, 2, 6, 9, 4], [2, 6, 3, 1, 4, 0, 5, 8, 7, 9], [5, 7, 2, 4, 8, 9, 0, 6, 3, 1], [2, 7, 6, 8, 9, 3, 0, 4, 1, 5], [7, 4, 2, 1, 8, 0, 3, 5, 9, 6], [6, 7, 2, 0, 8, 5, 4, 9, 3, 1], [4, 7, 2, 5, 8, 0, 1, 3, 9, 6], [7, 4, 0, 1, 3, 6, 2, 8, 5, 9], [4, 2, 7, 8, 0, 1, 3, 6, 5, 9], [5, 1, 2, 8, 0, 3, 6, 7, 9, 4], [0, 3, 2, 5, 9, 7, 8, 1, 6, 4], [7, 1, 2, 3, 5, 8, 6, 0, 9, 4], [1, 7, 6, 2, 0, 4, 5, 3, 9, 8], [7, 1, 5, 4, 8, 3, 0, 6, 2, 9], [8, 0, 3, 4, 7, 6, 2, 5, 1, 9], [1, 5, 7, 6, 4, 3, 8, 2, 0, 9], [4, 3, 2, 8, 7, 0, 5, 6, 9, 1], [0, 3, 5, 4, 1, 7, 8, 9, 2, 6], [7, 0, 5, 2, 3, 8, 1, 6, 9, 4], [2, 7, 8, 0, 1, 3, 9, 6, 4, 5], [4, 5, 1, 7, 8, 2, 0, 3, 6, 9], [2, 4, 8, 7, 9, 0, 3, 6, 1, 5], [7, 1, 8, 2, 0, 6, 4, 5, 3, 9], [7, 2, 6, 0, 1, 8, 3, 9, 4, 5], [5, 0, 7, 8, 2, 9, 3, 6, 1, 4], [7, 2, 5, 8, 0, 6, 1, 9, 3, 4], [7, 3, 5, 2, 6, 8, 4, 1, 0, 9], [0, 7, 2, 8, 4, 1, 5, 9, 3, 6], [4, 1, 7, 5, 0, 2, 8, 6, 3, 9], [2, 0, 1, 7, 8, 3, 5, 9, 6, 4], [7, 4, 1, 5, 0, 2, 8, 9, 6, 3], [0, 2, 7, 1, 4, 9, 5, 8, 3, 6], [5, 4, 0, 3, 7, 2, 8, 9, 6, 1], [7, 2, 0, 3, 5, 8, 4, 6, 1, 9], [5, 4, 7, 1, 8, 0, 3, 2, 9, 6], [2, 1, 0, 6, 3, 8, 5, 7, 9, 4], [0, 6, 5, 9, 2, 3, 7, 8, 4, 1], [4, 5, 3, 0, 7, 1, 2, 8, 9, 6], [4, 7, 5, 2, 3, 0, 8, 1, 6, 9], [2, 7, 3, 0, 1, 8, 9, 4, 6, 5], [5, 7, 9, 0, 4, 3, 1, 8, 2, 6], [5, 1, 0, 9, 7, 3, 8, 2, 4, 6], [7, 0, 1, 2, 4, 5, 3, 8, 9, 6], [7, 4, 2, 0, 1, 8, 5, 6, 3, 9], [8, 2, 0, 1, 3, 5, 4, 6, 7, 9], [7, 4, 2, 5, 1, 8, 6, 0, 3, 9], [4, 7, 5, 3, 2, 8, 0, 1, 9, 6], [7, 3, 4, 0, 8, 5, 1, 9, 6, 2], [3, 8, 2, 1, 9, 0, 6, 5, 4, 7], [3, 0, 5, 8, 9, 2, 7, 6, 1, 4], [7, 2, 1, 4, 8, 0, 5, 6, 9, 3], [1, 7, 5, 2, 8, 3, 0, 6, 4, 9], [0, 7, 2, 3, 1, 4, 8, 6, 9, 5], [7, 2, 0, 8, 6, 9, 1, 3, 5, 4], [7, 0, 1, 5, 8, 2, 9, 6, 4, 3], [4, 7, 0, 2, 3, 5, 1, 8, 6, 9], [7, 5, 4, 2, 1, 8, 0, 9, 6, 3], [0, 2, 8, 6, 9, 1, 3, 5, 4, 7], [6, 4, 1, 0, 5, 7, 3, 2, 8, 9], [4, 7, 0, 5, 2, 3, 8, 9, 1, 6], [7, 0, 1, 5, 3, 2, 8, 9, 4, 6], [0, 3, 4, 2, 7, 5, 1, 8, 6, 9], [7, 2, 8, 4, 5, 9, 0, 6, 1, 3], [7, 1, 2, 5, 4, 0, 8, 3, 6, 9], [4, 2, 5, 8, 9, 6, 3, 0, 7, 1], [1, 7, 0, 4, 5, 3, 6, 2, 9, 8], [0, 3, 2, 5, 8, 7, 9, 1, 6, 4], [7, 2, 8, 1, 0, 6, 3, 5, 9, 4], [2, 7, 8, 0, 4, 5, 1, 3, 9, 6], [7, 5, 3, 0, 1, 2, 6, 9, 8, 4], [2, 3, 5, 4, 7, 1, 0, 8, 9, 6], [0, 7, 2, 1, 6, 3, 8, 9, 4, 5], [7, 2, 1, 5, 6, 8, 9, 4, 3, 0], [0, 7, 2, 8, 3, 1, 9, 6, 5, 4], [4, 2, 7, 5, 3, 0, 8, 9, 6, 1], [3, 0, 9, 8, 2, 1, 7, 6, 5, 4], [4, 7, 0, 6, 1, 3, 2, 8, 9, 5], [7, 4, 2, 8, 5, 0, 3, 1, 9, 6], [7, 1, 8, 2, 0, 5, 4, 6, 9, 3], [7, 6, 4, 0, 3, 2, 8, 1, 9, 5], [4, 7, 2, 0, 3, 5, 1, 6, 8, 9], [5, 7, 2, 1, 8, 6, 0, 3, 9, 4], [0, 4, 7, 5, 2, 3, 6, 1, 8, 9], [8, 2, 3, 1, 0, 9, 6, 5, 4, 7], [7, 4, 2, 1, 0, 8, 3, 6, 9, 5], [4, 7, 5, 2, 6, 1, 0, 3, 8, 9], [7, 0, 5, 2, 8, 3, 4, 9, 1, 6], [2, 8, 9, 5, 6, 0, 3, 7, 1, 4], [6, 8, 2, 7, 3, 4, 9, 5, 1, 0], [1, 7, 4, 5, 6, 0, 3, 8, 2, 9], [7, 8, 2, 6, 5, 3, 4, 0, 9, 1], [7, 2, 1, 0, 8, 6, 9, 5, 4, 3], [7, 2, 8, 1, 4, 0, 5, 3, 9, 6], [0, 2, 3, 5, 8, 6, 7, 9, 4, 1], [1, 7, 4, 5, 8, 2, 3, 6, 9, 0], [7, 2, 5, 4, 0, 8, 9, 6, 3, 1], [5, 4, 7, 3, 0, 6, 8, 2, 1, 9], [0, 2, 6, 1, 8, 9, 7, 5, 4, 3], [7, 4, 5, 0, 1, 8, 2, 3, 6, 9], [1, 8, 5, 4, 9, 2, 0, 6, 7, 3], [7, 5, 8, 4, 2, 0, 3, 9, 1, 6], [0, 6, 1, 3, 4, 7, 9, 8, 2, 5], [4, 7, 2, 0, 3, 6, 8, 9, 5, 1], [6, 5, 3, 0, 2, 4, 1, 8, 9, 7], [7, 1, 2, 5, 4, 8, 3, 6, 0, 9], [0, 2, 1, 7, 3, 8, 9, 4, 5, 6], [6, 0, 2, 3, 4, 8, 9, 7, 5, 1], [6, 0, 2, 3, 1, 5, 8, 4, 9, 7], [7, 2, 8, 6, 9, 1, 3, 0, 4, 5], [7, 0, 3, 4, 1, 5, 2, 8, 9, 6], [1, 5, 6, 3, 0, 9, 7, 8, 2, 4], [8, 7, 5, 3, 2, 0, 6, 9, 1, 4], [5, 2, 0, 3, 1, 7, 4, 9, 8, 6], [2, 7, 1, 5, 0, 8, 4, 6, 3, 9], [7, 2, 4, 1, 8, 5, 0, 3, 6, 9], [7, 5, 3, 2, 4, 0, 8, 9, 6, 1], [1, 4, 5, 7, 0, 2, 6, 3, 9, 8], [5, 8, 7, 3, 0, 2, 9, 1, 4, 6], [5, 7, 3, 4, 0, 8, 2, 1, 9, 6], [3, 4, 0, 5, 9, 7, 2, 1, 8, 6], [6, 3, 9, 0, 4, 1, 2, 8, 7, 5], [4, 1, 7, 8, 6, 5, 3, 9, 2, 0], [0, 8, 2, 7, 6, 9, 3, 1, 5, 4], [3, 0, 1, 5, 2, 7, 6, 9, 8, 4], [7, 2, 5, 4, 3, 0, 8, 1, 6, 9], [1, 3, 8, 0, 2, 7, 5, 6, 9, 4], [8, 6, 0, 2, 3, 5, 9, 1, 7, 4], [7, 1, 5, 6, 2, 9, 8, 3, 4, 0], [2, 0, 3, 8, 5, 7, 1, 9, 6, 4], [5, 0, 3, 6, 1, 2, 8, 7, 9, 4], [7, 4, 1, 0, 5, 8, 2, 3, 6, 9], [3, 0, 5, 1, 6, 7, 2, 8, 9, 4], [4, 5, 0, 2, 8, 7, 9, 3, 1, 6], [7, 5, 2, 6, 3, 0, 8, 9, 1, 4], [5, 4, 7, 2, 1, 8, 3, 9, 6, 0], [5, 7, 0, 4, 1, 2, 9, 8, 6, 3], [1, 7, 2, 3, 0, 6, 8, 9, 4, 5], [2, 7, 8, 0, 3, 6, 1, 9, 4, 5], [1, 7, 2, 5, 0, 8, 9, 6, 3, 4], [4, 7, 2, 5, 6, 0, 8, 3, 1, 9], [7, 5, 2, 3, 0, 9, 8, 6, 1, 4], [7, 4, 3, 1, 2, 5, 8, 0, 9, 6], [4, 1, 7, 0, 5, 3, 8, 2, 9, 6], [3, 8, 0, 2, 7, 9, 6, 1, 5, 4], [7, 2, 3, 5, 0, 8, 4, 9, 1, 6], [1, 4, 5, 7, 6, 2, 0, 3, 8, 9], [5, 7, 2, 4, 1, 8, 6, 0, 3, 9], [5, 6, 0, 2, 7, 8, 3, 9, 1, 4], [1, 6, 9, 2, 8, 0, 3, 7, 4, 5], [7, 8, 9, 2, 1, 3, 6, 0, 4, 5], [7, 4, 5, 2, 8, 1, 0, 6, 3, 9], [3, 0, 7, 5, 1, 2, 8, 4, 9, 6], [7, 6, 5, 2, 4, 8, 9, 0, 1, 3], [2, 7, 8, 1, 0, 3, 6, 9, 5, 4], [3, 6, 2, 1, 0, 8, 9, 7, 4, 5], [7, 2, 5, 0, 3, 8, 6, 9, 1, 4], [8, 7, 3, 4, 5, 0, 1, 2, 6, 9], [2, 1, 7, 4, 5, 3, 0, 6, 9, 8], [7, 1, 2, 8, 3, 0, 4, 5, 6, 9], [5, 7, 3, 2, 6, 8, 9, 0, 4, 1], [7, 2, 0, 3, 8, 9, 5, 6, 1, 4], [8, 2, 4, 5, 7, 0, 1, 9, 6, 3], [6, 0, 3, 7, 2, 9, 8, 5, 4, 1], [0, 8, 3, 7, 2, 1, 5, 6, 9, 4], [4, 5, 1, 0, 7, 2, 8, 3, 6, 9], [3, 7, 1, 4, 0, 5, 2, 8, 9, 6], [3, 7, 2, 0, 5, 4, 1, 6, 8, 9], [4, 7, 5, 3, 0, 1, 8, 2, 9, 6], [0, 3, 8, 2, 7, 1, 6, 9, 4, 5], [4, 0, 7, 3, 1, 5, 6, 8, 2, 9], [4, 5, 9, 0, 1, 3, 6, 2, 8, 7], [5, 7, 6, 2, 4, 1, 3, 0, 9, 8], [5, 8, 7, 4, 3, 0, 2, 9, 1, 6], [5, 0, 3, 6, 1, 9, 8, 2, 7, 4], [7, 1, 4, 5, 0, 2, 3, 6, 8, 9], [1, 0, 7, 2, 5, 8, 3, 6, 4, 9], [6, 7, 8, 0, 2, 9, 3, 4, 5, 1], [3, 0, 9, 2, 1, 6, 8, 4, 7, 5], [8, 2, 3, 9, 0, 6, 7, 1, 5, 4], [4, 5, 9, 7, 6, 2, 8, 0, 1, 3], [7, 1, 4, 5, 3, 6, 8, 2, 0, 9], [4, 7, 2, 5, 8, 9, 3, 6, 1, 0], [2, 8, 3, 9, 7, 1, 6, 0, 5, 4], [0, 7, 8, 2, 9, 1, 3, 6, 5, 4], [2, 5, 4, 1, 0, 7, 8, 3, 6, 9], [5, 7, 2, 8, 6, 9, 3, 1, 0, 4], [2, 8, 7, 5, 0, 3, 9, 6, 4, 1], [4, 5, 1, 7, 2, 0, 8, 6, 3, 9], [7, 2, 4, 8, 3, 1, 5, 6, 9, 0], [0, 4, 7, 1, 5, 2, 6, 8, 9, 3], [7, 2, 8, 4, 3, 1, 0, 5, 6, 9], [7, 2, 5, 4, 8, 0, 3, 9, 1, 6], [4, 5, 7, 1, 6, 3, 2, 9, 8, 0], [7, 5, 1, 3, 0, 9, 8, 2, 6, 4], [7, 4, 6, 2, 5, 8, 0, 9, 1, 3], [7, 5, 4, 1, 0, 2, 8, 3, 6, 9], [1, 7, 2, 8, 3, 5, 4, 0, 9, 6], [7, 0, 4, 2, 8, 3, 1, 6, 5, 9], [0, 3, 4, 1, 5, 8, 9, 6, 7, 2], [8, 3, 1, 7, 5, 2, 4, 0, 9, 6], [0, 2, 3, 7, 9, 8, 6, 1, 4, 5], [7, 5, 2, 3, 0, 6, 8, 1, 9, 4], [1, 7, 3, 5, 0, 8, 6, 2, 4, 9], [5, 4, 6, 2, 7, 9, 8, 0, 3, 1], [1, 4, 7, 5, 8, 2, 3, 9, 6, 0], [4, 1, 6, 0, 5, 3, 7, 2, 8, 9], [7, 5, 3, 0, 1, 2, 6, 8, 9, 4], [1, 6, 7, 2, 5, 0, 9, 3, 8, 4], [2, 0, 8, 1, 7, 5, 3, 6, 9, 4], [5, 7, 2, 0, 1, 6, 3, 8, 9, 4], [4, 7, 5, 0, 2, 1, 8, 3, 9, 6], [7, 0, 4, 5, 2, 1, 6, 8, 9, 3], [4, 5, 0, 3, 7, 6, 8, 2, 9, 1], [0, 7, 2, 8, 5, 4, 1, 9, 3, 6], [7, 1, 8, 2, 9, 6, 5, 0, 3, 4], [7, 1, 2, 4, 3, 5, 8, 0, 9, 6], [4, 5, 7, 2, 0, 6, 8, 1, 3, 9], [4, 5, 7, 2, 0, 3, 9, 1, 6, 8], [7, 1, 0, 3, 6, 4, 9, 8, 2, 5], [7, 3, 6, 2, 9, 8, 0, 1, 5, 4], [7, 4, 1, 5, 0, 8, 2, 6, 9, 3], [4, 5, 0, 1, 7, 2, 3, 8, 9, 6], [1, 6, 8, 2, 9, 7, 3, 0, 4, 5], [0, 7, 2, 3, 8, 5, 6, 1, 4, 9], [0, 7, 2, 8, 9, 6, 4, 5, 1, 3], [2, 7, 8, 6, 4, 1, 9, 0, 3, 5], [0, 3, 2, 8, 7, 5, 1, 9, 6, 4], [7, 2, 4, 3, 5, 0, 8, 9, 6, 1], [0, 1, 2, 6, 7, 8, 3, 4, 5, 9], [5, 7, 2, 8, 9, 6, 0, 4, 1, 3], [7, 1, 8, 5, 0, 6, 3, 2, 9, 4], [7, 4, 5, 2, 1, 0, 3, 8, 6, 9], [4, 7, 5, 0, 2, 3, 1, 8, 9, 6], [7, 3, 1, 8, 2, 9, 0, 6, 5, 4], [1, 2, 0, 3, 4, 8, 9, 7, 5, 6], [7, 1, 5, 4, 3, 0, 2, 8, 6, 9], [3, 0, 7, 1, 5, 8, 6, 4, 9, 2], [6, 2, 1, 7, 4, 3, 9, 8, 0, 5], [7, 0, 2, 3, 8, 9, 6, 4, 5, 1], [6, 7, 5, 8, 1, 2, 9, 3, 0, 4], [4, 7, 5, 2, 3, 0, 8, 9, 1, 6], [7, 2, 6, 0, 8, 9, 5, 1, 3, 4], [7, 2, 1, 5, 0, 4, 6, 8, 3, 9], [7, 4, 3, 5, 6, 0, 1, 2, 8, 9], [4, 5, 7, 3, 0, 1, 2, 8, 6, 9], [8, 3, 0, 7, 2, 4, 5, 6, 9, 1], [5, 4, 7, 3, 0, 8, 9, 2, 1, 6], [7, 4, 5, 2, 8, 1, 6, 0, 3, 9], [7, 2, 8, 0, 3, 5, 4, 6, 1, 9], [1, 2, 3, 7, 5, 4, 0, 8, 9, 6], [7, 4, 5, 2, 1, 0, 6, 8, 3, 9], [4, 7, 5, 3, 0, 2, 8, 1, 9, 6], [4, 1, 3, 7, 2, 8, 0, 5, 6, 9], [7, 0, 2, 8, 1, 3, 5, 6, 9, 4], [2, 4, 0, 3, 7, 9, 8, 1, 6, 5], [1, 3, 7, 2, 5, 4, 8, 0, 9, 6], [8, 0, 7, 6, 9, 2, 1, 3, 5, 4], [7, 3, 1, 8, 2, 5, 0, 6, 4, 9], [1, 5, 4, 3, 7, 6, 2, 8, 0, 9], [5, 4, 7, 2, 8, 0, 3, 9, 1, 6], [1, 7, 2, 0, 3, 8, 9, 6, 5, 4], [5, 7, 2, 3, 8, 4, 9, 0, 6, 1], [7, 4, 1, 2, 8, 5, 3, 0, 6, 9], [5, 0, 3, 2, 7, 8, 9, 6, 4, 1], [7, 2, 8, 5, 4, 1, 6, 0, 9, 3], [0, 2, 7, 8, 9, 3, 5, 6, 4, 1], [5, 1, 6, 2, 7, 8, 0, 3, 4, 9], [1, 7, 3, 0, 4, 8, 6, 2, 9, 5], [7, 2, 0, 4, 1, 5, 3, 9, 8, 6], [7, 4, 2, 5, 1, 8, 3, 0, 6, 9], [5, 1, 8, 2, 4, 3, 0, 6, 9, 7], [0, 7, 3, 6, 2, 1, 8, 9, 4, 5], [6, 7, 4, 8, 0, 2, 5, 1, 9, 3], [7, 6, 1, 2, 3, 8, 0, 5, 9, 4], [7, 4, 8, 3, 1, 2, 0, 5, 6, 9], [7, 2, 9, 8, 5, 0, 1, 3, 6, 4], [0, 7, 1, 2, 4, 5, 3, 8, 6, 9], [2, 1, 4, 5, 8, 6, 0, 3, 7, 9], [7, 1, 4, 2, 0, 3, 8, 9, 5, 6], [6, 1, 9, 0, 2, 7, 3, 4, 8, 5], [4, 3, 9, 5, 2, 7, 6, 8, 1, 0], [7, 2, 8, 5, 1, 4, 3, 0, 9, 6], [7, 1, 6, 0, 9, 2, 8, 5, 3, 4], [2, 4, 8, 7, 1, 5, 0, 3, 6, 9], [5, 7, 4, 8, 1, 0, 3, 6, 2, 9], [7, 1, 4, 0, 8, 6, 2, 5, 9, 3], [1, 6, 0, 5, 9, 4, 3, 8, 2, 7], [7, 5, 4, 1, 3, 0, 8, 2, 9, 6], [1, 9, 0, 6, 3, 4, 8, 2, 7, 5], [5, 4, 7, 0, 1, 2, 3, 8, 9, 6], [5, 7, 2, 0, 3, 8, 4, 1, 6, 9], [3, 7, 2, 9, 8, 4, 6, 5, 1, 0], [7, 1, 2, 5, 3, 4, 8, 9, 0, 6], [7, 5, 3, 2, 8, 1, 6, 0, 4, 9], [7, 4, 2, 1, 8, 0, 3, 5, 9, 6], [8, 7, 1, 0, 2, 5, 9, 6, 4, 3], [6, 0, 3, 9, 5, 4, 7, 8, 2, 1], [1, 2, 7, 8, 0, 6, 3, 4, 5, 9], [7, 6, 0, 5, 3, 2, 8, 9, 1, 4], [1, 6, 9, 8, 7, 2, 0, 3, 5, 4], [0, 3, 2, 8, 7, 5, 6, 1, 4, 9], [7, 3, 2, 8, 5, 4, 0, 9, 1, 6], [1, 4, 3, 0, 2, 8, 6, 7, 5, 9], [7, 4, 1, 5, 0, 3, 6, 8, 2, 9], [5, 7, 4, 1, 0, 2, 8, 6, 3, 9], [1, 7, 5, 6, 3, 0, 4, 2, 8, 9], [4, 5, 1, 0, 3, 7, 8, 2, 9, 6], [7, 2, 3, 0, 6, 1, 9, 8, 5, 4], [1, 6, 5, 9, 0, 8, 2, 7, 3, 4], [7, 5, 4, 0, 2, 3, 6, 8, 9, 1], [5, 6, 4, 1, 3, 9, 8, 0, 7, 2], [7, 4, 5, 1, 3, 2, 8, 0, 9, 6], [7, 3, 5, 8, 2, 6, 0, 9, 1, 4], [7, 4, 5, 0, 1, 2, 9, 8, 3, 6], [5, 1, 9, 4, 7, 8, 6, 0, 2, 3], [6, 7, 2, 8, 9, 3, 1, 4, 5, 0], [3, 5, 4, 2, 0, 8, 7, 1, 9, 6], [4, 7, 1, 5, 3, 2, 8, 6, 0, 9], [7, 4, 2, 5, 3, 1, 6, 8, 9, 0], [6, 1, 7, 4, 5, 8, 2, 9, 3, 0], [0, 3, 7, 2, 8, 9, 6, 4, 5, 1], [7, 2, 5, 4, 3, 1, 8, 6, 0, 9], [7, 4, 0, 1, 5, 3, 8, 2, 6, 9], [1, 7, 2, 6, 0, 8, 3, 5, 4, 9], [5, 7, 2, 8, 4, 0, 3, 6, 9, 1], [9, 2, 0, 5, 7, 8, 3, 1, 4, 6], [7, 0, 1, 8, 4, 6, 3, 5, 2, 9], [7, 4, 1, 0, 3, 2, 8, 5, 6, 9], [7, 1, 5, 8, 6, 2, 0, 3, 4, 9], [4, 1, 5, 3, 0, 2, 8, 7, 9, 6], [4, 7, 0, 1, 3, 2, 9, 8, 6, 5], [7, 6, 0, 8, 9, 2, 3, 1, 4, 5], [0, 7, 8, 3, 4, 5, 9, 2, 1, 6], [4, 7, 8, 5, 1, 2, 0, 6, 9, 3], [1, 0, 6, 5, 3, 7, 2, 8, 9, 4], [5, 8, 9, 3, 0, 2, 6, 1, 7, 4], [7, 4, 5, 2, 0, 1, 6, 8, 3, 9], [4, 5, 1, 2, 3, 0, 6, 7, 8, 9], [3, 7, 9, 8, 1, 6, 2, 0, 4, 5], [5, 8, 7, 2, 0, 3, 9, 4, 6, 1], [7, 5, 0, 4, 2, 3, 8, 6, 1, 9], [4, 7, 2, 1, 8, 5, 9, 3, 0, 6], [3, 0, 8, 2, 1, 7, 9, 6, 5, 4], [6, 5, 4, 7, 0, 2, 8, 1, 3, 9], [7, 3, 0, 2, 1, 5, 4, 6, 8, 9], [7, 4, 2, 5, 3, 8, 0, 1, 9, 6], [7, 3, 1, 0, 4, 2, 8, 9, 5, 6], [8, 1, 0, 2, 4, 6, 9, 5, 3, 7], [6, 5, 1, 8, 7, 2, 3, 0, 9, 4], [7, 4, 1, 5, 3, 6, 0, 2, 8, 9], [7, 2, 1, 9, 8, 6, 5, 3, 4, 0], [0, 4, 6, 9, 1, 7, 2, 5, 8, 3], [4, 7, 1, 5, 2, 3, 6, 0, 8, 9], [7, 2, 1, 3, 0, 8, 5, 4, 6, 9], [4, 5, 2, 1, 8, 0, 3, 7, 9, 6], [7, 3, 5, 0, 2, 4, 8, 6, 9, 1], [4, 5, 7, 0, 2, 8, 1, 3, 9, 6], [7, 2, 3, 0, 6, 9, 5, 1, 4, 8], [7, 2, 1, 3, 8, 5, 4, 6, 0, 9], [4, 7, 0, 8, 2, 6, 5, 3, 9, 1], [3, 7, 8, 9, 0, 2, 1, 4, 5, 6], [7, 8, 3, 4, 5, 1, 9, 0, 2, 6], [7, 2, 5, 0, 8, 1, 4, 9, 3, 6], [2, 7, 0, 4, 8, 9, 5, 1, 3, 6], [7, 2, 5, 1, 3, 8, 0, 6, 9, 4], [5, 2, 7, 0, 8, 1, 4, 3, 6, 9], [1, 0, 9, 8, 6, 5, 7, 2, 3, 4], [7, 1, 2, 0, 4, 3, 6, 5, 9, 8], [6, 9, 8, 1, 0, 7, 5, 2, 3, 4], [4, 0, 7, 5, 3, 2, 6, 9, 8, 1], [7, 1, 2, 5, 6, 4, 9, 3, 8, 0], [4, 7, 2, 3, 8, 6, 1, 5, 9, 0], [1, 3, 8, 2, 6, 0, 7, 9, 4, 5], [0, 3, 2, 7, 1, 8, 6, 9, 5, 4], [0, 5, 9, 1, 6, 3, 7, 2, 8, 4], [1, 0, 7, 6, 2, 8, 9, 3, 5, 4], [6, 1, 7, 2, 0, 3, 4, 5, 8, 9], [7, 6, 1, 5, 3, 2, 4, 0, 8, 9], [1, 7, 2, 8, 0, 6, 3, 5, 4, 9], [6, 1, 7, 4, 5, 8, 2, 0, 3, 9], [1, 4, 0, 6, 3, 9, 8, 5, 2, 7], [4, 7, 5, 1, 2, 8, 0, 6, 9, 3], [1, 7, 2, 0, 6, 4, 3, 8, 5, 9], [6, 3, 7, 2, 9, 1, 8, 0, 5, 4], [9, 8, 7, 6, 3, 5, 2, 0, 4, 1], [0, 4, 2, 7, 6, 5, 3, 8, 1, 9], [2, 1, 8, 5, 7, 4, 9, 0, 6, 3], [5, 7, 3, 2, 0, 8, 4, 6, 1, 9], [7, 5, 4, 3, 1, 8, 0, 2, 9, 6], [7, 2, 1, 8, 0, 3, 4, 6, 9, 5], [7, 4, 5, 2, 3, 8, 0, 6, 9, 1], [7, 6, 8, 3, 2, 1, 9, 0, 5, 4], [4, 0, 7, 3, 1, 5, 8, 6, 9, 2], [5, 0, 7, 3, 2, 8, 9, 1, 6, 4], [7, 4, 0, 1, 3, 5, 2, 6, 8, 9], [0, 5, 6, 7, 2, 4, 3, 1, 8, 9], [1, 7, 4, 0, 2, 6, 8, 5, 9, 3], [1, 8, 7, 2, 3, 6, 5, 9, 0, 4], [5, 4, 1, 7, 3, 0, 6, 8, 9, 2], [7, 1, 2, 4, 8, 0, 5, 6, 9, 3], [2, 6, 4, 5, 7, 0, 1, 8, 3, 9], [7, 2, 0, 3, 8, 1, 6, 9, 5, 4], [0, 2, 3, 8, 9, 6, 1, 7, 5, 4], [6, 1, 0, 2, 8, 9, 7, 3, 5, 4], [7, 4, 5, 1, 0, 6, 8, 3, 2, 9], [3, 2, 0, 7, 8, 4, 5, 9, 1, 6], [7, 4, 1, 0, 2, 8, 5, 3, 9, 6], [7, 3, 8, 1, 2, 0, 5, 4, 6, 9], [0, 7, 5, 2, 3, 1, 9, 4, 8, 6], [7, 5, 4, 2, 0, 3, 8, 9, 1, 6], [7, 2, 6, 1, 5, 0, 8, 3, 9, 4], [5, 6, 9, 0, 3, 1, 8, 7, 4, 2], [5, 7, 4, 1, 6, 8, 2, 0, 3, 9], [1, 5, 6, 3, 7, 8, 0, 9, 4, 2], [3, 0, 8, 4, 2, 5, 7, 1, 9, 6], [4, 0, 1, 5, 7, 3, 6, 2, 8, 9], [5, 4, 1, 6, 7, 2, 0, 8, 3, 9], [7, 4, 5, 2, 0, 1, 3, 6, 8, 9], [2, 7, 8, 6, 3, 5, 4, 9, 1, 0], [2, 8, 0, 5, 3, 4, 6, 9, 7, 1], [2, 0, 3, 1, 7, 6, 8, 5, 4, 9], [0, 2, 7, 9, 1, 3, 5, 4, 6, 8], [3, 8, 2, 7, 4, 0, 9, 1, 5, 6], [2, 5, 7, 1, 3, 8, 4, 6, 9, 0], [2, 7, 0, 3, 8, 6, 9, 1, 4, 5], [4, 1, 7, 5, 2, 8, 0, 3, 9, 6], [7, 0, 2, 4, 9, 6, 5, 8, 3, 1], [7, 2, 8, 5, 0, 3, 6, 4, 9, 1], [0, 3, 6, 9, 7, 2, 4, 8, 5, 1], [7, 5, 4, 8, 1, 2, 6, 0, 3, 9], [7, 4, 1, 5, 0, 3, 2, 6, 8, 9], [4, 5, 1, 7, 3, 2, 0, 6, 8, 9], [9, 7, 5, 0, 2, 8, 3, 6, 4, 1], [7, 1, 4, 8, 5, 2, 3, 0, 6, 9], [7, 1, 6, 5, 4, 2, 0, 8, 9, 3], [4, 3, 0, 2, 1, 8, 9, 6, 7, 5], [8, 1, 3, 7, 0, 2, 9, 5, 6, 4], [5, 7, 2, 0, 9, 8, 1, 3, 6, 4], [6, 5, 0, 7, 8, 2, 3, 9, 1, 4], [0, 3, 7, 5, 6, 1, 8, 2, 4, 9], [7, 0, 8, 6, 5, 3, 2, 1, 9, 4], [7, 3, 2, 4, 0, 1, 8, 9, 5, 6], [7, 0, 4, 2, 8, 5, 3, 1, 9, 6], [1, 4, 7, 2, 3, 5, 8, 6, 0, 9], [4, 5, 0, 3, 8, 1, 9, 7, 2, 6], [6, 5, 7, 2, 8, 0, 4, 1, 3, 9], [5, 4, 3, 0, 6, 9, 1, 8, 7, 2], [1, 8, 2, 3, 0, 7, 6, 9, 5, 4], [0, 7, 2, 8, 4, 6, 1, 3, 9, 5], [3, 0, 8, 2, 7, 9, 6, 1, 5, 4], [1, 4, 0, 2, 5, 7, 8, 9, 3, 6], [5, 7, 8, 2, 0, 4, 3, 1, 6, 9], [1, 0, 5, 7, 2, 3, 4, 6, 8, 9], [7, 2, 3, 0, 1, 5, 8, 4, 6, 9], [1, 5, 9, 4, 8, 7, 2, 3, 0, 6], [7, 4, 5, 3, 2, 1, 8, 0, 9, 6], [0, 7, 2, 8, 5, 4, 9, 3, 1, 6], [1, 7, 2, 6, 0, 4, 8, 3, 5, 9], [1, 5, 0, 4, 6, 7, 2, 3, 8, 9], [4, 7, 5, 2, 0, 3, 1, 6, 8, 9], [0, 1, 3, 9, 6, 8, 7, 2, 5, 4], [7, 1, 0, 6, 2, 3, 8, 9, 5, 4], [4, 5, 1, 0, 3, 6, 9, 2, 8, 7], [1, 4, 0, 3, 5, 6, 7, 8, 2, 9], [3, 0, 5, 8, 9, 6, 1, 7, 2, 4], [7, 2, 3, 0, 8, 1, 5, 9, 4, 6], [6, 2, 7, 1, 0, 4, 8, 9, 5, 3], [7, 2, 5, 4, 8, 0, 6, 9, 3, 1], [6, 1, 3, 9, 5, 0, 7, 8, 2, 4], [7, 5, 4, 1, 0, 2, 8, 6, 3, 9], [5, 7, 4, 0, 1, 8, 2, 9, 6, 3], [1, 0, 5, 8, 2, 7, 6, 3, 4, 9], [1, 0, 6, 9, 8, 4, 3, 7, 2, 5], [5, 4, 7, 2, 9, 8, 6, 1, 3, 0], [4, 0, 1, 8, 2, 6, 9, 7, 3, 5], [2, 7, 1, 8, 5, 0, 6, 3, 9, 4], [0, 5, 4, 8, 6, 3, 9, 7, 2, 1], [6, 1, 9, 0, 5, 4, 7, 2, 8, 3], [1, 3, 7, 2, 6, 9, 0, 8, 5, 4], [4, 3, 7, 1, 6, 5, 2, 0, 8, 9], [7, 2, 3, 8, 9, 5, 6, 1, 0, 4], [7, 4, 2, 0, 3, 5, 1, 8, 6, 9], [3, 2, 7, 1, 5, 9, 8, 0, 6, 4], [6, 7, 1, 2, 8, 9, 0, 5, 4, 3], [2, 7, 5, 1, 0, 8, 3, 9, 6, 4], [5, 7, 8, 1, 3, 2, 4, 0, 9, 6], [1, 5, 6, 7, 0, 8, 2, 3, 4, 9], [4, 1, 7, 5, 0, 3, 2, 8, 9, 6], [5, 3, 0, 1, 6, 9, 7, 8, 4, 2], [7, 2, 5, 4, 8, 9, 0, 3, 1, 6], [5, 2, 4, 6, 1, 8, 3, 9, 0, 7], [4, 7, 5, 0, 3, 2, 1, 8, 6, 9], [7, 4, 5, 8, 2, 1, 0, 3, 9, 6], [1, 7, 8, 2, 4, 5, 0, 9, 6, 3], [7, 5, 6, 4, 2, 8, 0, 1, 9, 3], [4, 1, 7, 5, 2, 6, 0, 3, 8, 9], [2, 7, 0, 3, 8, 1, 9, 5, 6, 4], [7, 4, 5, 1, 0, 2, 3, 8, 9, 6], [4, 7, 5, 1, 0, 3, 8, 2, 9, 6], [8, 5, 3, 2, 6, 7, 1, 0, 9, 4], [7, 1, 8, 2, 4, 5, 0, 6, 9, 3], [7, 1, 2, 8, 4, 0, 5, 6, 9, 3], [6, 5, 1, 7, 2, 8, 3, 0, 4, 9], [1, 4, 7, 5, 8, 3, 2, 0, 9, 6], [0, 3, 7, 6, 4, 5, 2, 9, 1, 8], [7, 4, 2, 0, 3, 5, 6, 8, 9, 1], [4, 1, 7, 3, 8, 5, 0, 6, 2, 9], [1, 7, 4, 2, 5, 0, 8, 6, 3, 9], [7, 2, 4, 8, 1, 5, 6, 0, 3, 9], [1, 7, 3, 2, 0, 4, 6, 8, 9, 5], [5, 8, 2, 0, 6, 7, 9, 3, 4, 1], [7, 2, 5, 8, 0, 3, 1, 9, 6, 4], [7, 4, 0, 3, 9, 8, 5, 6, 2, 1], [5, 7, 0, 3, 4, 9, 8, 1, 6, 2], [4, 0, 1, 8, 2, 7, 9, 3, 6, 5], [7, 3, 2, 6, 8, 1, 0, 9, 4, 5], [4, 0, 7, 8, 6, 1, 2, 5, 9, 3], [7, 2, 1, 8, 4, 0, 9, 5, 3, 6], [0, 5, 4, 1, 2, 8, 7, 6, 9, 3], [7, 4, 2, 0, 8, 5, 3, 9, 1, 6], [8, 2, 3, 1, 6, 0, 9, 5, 7, 4], [5, 7, 1, 0, 2, 8, 4, 9, 6, 3], [7, 2, 4, 5, 6, 0, 1, 3, 8, 9], [1, 4, 7, 5, 2, 0, 3, 9, 8, 6], [7, 2, 4, 8, 5, 0, 9, 1, 3, 6], [7, 2, 5, 0, 1, 4, 8, 9, 3, 6], [7, 2, 5, 0, 4, 8, 3, 6, 9, 1], [7, 4, 5, 1, 2, 8, 3, 0, 6, 9], [7, 2, 4, 5, 8, 9, 1, 6, 3, 0], [2, 5, 7, 4, 3, 0, 1, 6, 8, 9], [7, 4, 5, 1, 0, 2, 6, 8, 3, 9], [4, 7, 5, 1, 3, 0, 2, 8, 9, 6], [4, 5, 9, 6, 0, 3, 1, 7, 8, 2], [7, 0, 5, 4, 3, 2, 8, 1, 6, 9], [7, 5, 6, 1, 8, 2, 9, 0, 3, 4], [0, 6, 3, 9, 1, 7, 2, 8, 5, 4], [0, 5, 7, 2, 8, 3, 1, 9, 4, 6], [1, 4, 6, 5, 0, 7, 2, 3, 8, 9], [5, 3, 9, 0, 6, 2, 8, 7, 1, 4], [5, 1, 4, 7, 0, 9, 3, 2, 8, 6], [0, 7, 2, 4, 3, 6, 1, 8, 5, 9], [2, 5, 1, 7, 8, 0, 6, 3, 9, 4], [7, 3, 4, 5, 0, 1, 2, 8, 9, 6], [4, 7, 5, 0, 2, 8, 1, 3, 9, 6], [4, 1, 3, 7, 9, 2, 8, 0, 6, 5], [7, 1, 4, 8, 6, 5, 2, 9, 0, 3], [0, 7, 2, 8, 1, 3, 6, 9, 5, 4], [8, 1, 2, 9, 7, 0, 3, 6, 5, 4], [0, 7, 2, 4, 5, 8, 1, 9, 6, 3], [1, 4, 8, 2, 7, 3, 6, 5, 0, 9], [5, 4, 6, 7, 2, 8, 3, 0, 1, 9], [3, 7, 4, 6, 2, 5, 0, 8, 1, 9], [7, 1, 3, 0, 4, 8, 2, 5, 6, 9], [7, 5, 1, 0, 2, 8, 4, 9, 3, 6], [7, 0, 2, 5, 4, 3, 8, 1, 9, 6], [5, 4, 7, 0, 2, 3, 8, 6, 9, 1], [3, 0, 5, 1, 4, 7, 2, 8, 9, 6], [7, 4, 2, 8, 5, 0, 3, 1, 6, 9], [7, 1, 0, 4, 5, 8, 9, 2, 6, 3], [6, 7, 0, 1, 5, 2, 3, 4, 8, 9], [7, 5, 8, 2, 1, 0, 9, 3, 6, 4], [2, 9, 0, 6, 7, 3, 8, 1, 5, 4], [9, 2, 0, 3, 8, 4, 5, 1, 7, 6], [3, 7, 0, 2, 1, 6, 8, 9, 5, 4], [2, 0, 8, 6, 7, 5, 9, 3, 1, 4], [5, 7, 2, 4, 6, 0, 8, 9, 3, 1], [4, 7, 2, 1, 0, 3, 5, 8, 6, 9], [4, 5, 7, 8, 2, 1, 0, 3, 9, 6], [7, 1, 2, 8, 5, 0, 3, 9, 6, 4], [2, 7, 8, 9, 1, 6, 5, 0, 4, 3], [1, 7, 2, 8, 0, 5, 3, 6, 4, 9], [5, 7, 3, 0, 2, 6, 8, 1, 9, 4], [7, 5, 2, 8, 0, 6, 4, 3, 1, 9], [7, 6, 4, 1, 0, 2, 5, 3, 8, 9], [1, 5, 8, 3, 4, 2, 6, 0, 7, 9], [7, 0, 3, 2, 8, 9, 1, 6, 5, 4], [6, 0, 3, 9, 1, 7, 8, 2, 5, 4], [7, 3, 6, 0, 2, 4, 1, 5, 8, 9], [7, 0, 4, 3, 5, 2, 1, 8, 9, 6], [7, 3, 0, 6, 8, 2, 9, 1, 4, 5], [1, 7, 5, 3, 8, 2, 6, 0, 9, 4], [7, 5, 0, 2, 6, 8, 4, 9, 3, 1], [7, 4, 1, 6, 0, 5, 2, 8, 9, 3], [7, 1, 6, 0, 2, 4, 5, 8, 3, 9], [4, 0, 7, 2, 8, 1, 6, 9, 3, 5], [1, 7, 2, 8, 0, 5, 6, 9, 3, 4], [0, 1, 6, 9, 3, 7, 4, 2, 8, 5], [7, 1, 5, 3, 0, 2, 8, 6, 4, 9], [7, 2, 6, 4, 8, 3, 5, 1, 0, 9], [0, 1, 7, 6, 2, 9, 3, 8, 5, 4], [2, 7, 4, 5, 1, 8, 9, 6, 3, 0], [2, 8, 0, 3, 1, 9, 6, 5, 4, 7], [7, 6, 1, 5, 8, 4, 0, 2, 9, 3], [7, 4, 5, 1, 2, 8, 6, 0, 3, 9], [4, 5, 1, 7, 9, 6, 0, 8, 2, 3], [2, 8, 4, 0, 5, 3, 9, 6, 1, 7], [7, 4, 5, 0, 3, 1, 2, 8, 6, 9], [2, 7, 4, 5, 8, 0, 6, 1, 9, 3], [2, 1, 7, 0, 3, 6, 9, 8, 4, 5], [8, 5, 2, 7, 6, 0, 3, 9, 1, 4], [1, 2, 8, 3, 0, 7, 6, 9, 5, 4], [7, 2, 3, 1, 0, 8, 9, 6, 4, 5], [9, 0, 2, 6, 4, 7, 8, 5, 1, 3], [7, 6, 3, 0, 1, 8, 2, 9, 5, 4], [6, 0, 3, 7, 2, 8, 9, 1, 5, 4], [7, 4, 0, 2, 5, 3, 1, 8, 9, 6], [5, 7, 1, 0, 3, 4, 6, 8, 2, 9], [7, 4, 8, 1, 2, 3, 5, 6, 0, 9], [2, 7, 4, 5, 0, 1, 3, 8, 6, 9], [2, 8, 7, 6, 1, 5, 4, 0, 3, 9], [4, 1, 5, 0, 3, 7, 8, 2, 6, 9], [5, 3, 1, 7, 4, 2, 8, 0, 6, 9], [5, 6, 0, 1, 3, 4, 7, 2, 8, 9], [4, 7, 2, 1, 3, 8, 0, 5, 6, 9], [2, 7, 8, 0, 6, 9, 3, 4, 5, 1], [7, 3, 2, 1, 8, 0, 9, 6, 4, 5], [4, 7, 0, 5, 1, 2, 8, 3, 9, 6], [5, 7, 6, 2, 8, 3, 1, 9, 0, 4], [7, 4, 2, 8, 1, 0, 5, 3, 9, 6], [1, 3, 8, 9, 6, 7, 2, 0, 5, 4], [7, 8, 5, 2, 1, 0, 3, 9, 6, 4], [0, 2, 8, 3, 1, 6, 7, 9, 4, 5], [3, 7, 0, 6, 1, 5, 2, 8, 4, 9], [4, 7, 2, 5, 1, 3, 8, 0, 9, 6], [1, 4, 0, 7, 6, 2, 3, 9, 8, 5], [7, 5, 4, 2, 1, 8, 0, 3, 6, 9], [4, 7, 5, 2, 3, 8, 1, 9, 0, 6], [7, 2, 8, 3, 0, 9, 6, 1, 4, 5], [1, 0, 3, 6, 4, 7, 8, 2, 5, 9], [7, 0, 1, 3, 2, 8, 9, 6, 5, 4], [4, 7, 2, 0, 5, 3, 8, 9, 6, 1], [7, 2, 3, 4, 5, 0, 1, 6, 8, 9], [6, 0, 7, 8, 4, 2, 5, 3, 1, 9], [9, 1, 5, 0, 7, 2, 8, 4, 3, 6], [7, 4, 0, 2, 5, 3, 8, 1, 6, 9], [7, 5, 2, 4, 1, 8, 6, 9, 0, 3], [2, 7, 5, 8, 6, 9, 3, 0, 1, 4], [2, 0, 8, 1, 3, 7, 6, 4, 9, 5], [5, 0, 9, 2, 7, 3, 8, 6, 1, 4], [7, 5, 2, 8, 1, 4, 6, 3, 9, 0], [7, 8, 4, 1, 5, 2, 0, 3, 6, 9], [7, 4, 0, 5, 1, 3, 2, 8, 6, 9], [5, 1, 2, 7, 3, 8, 6, 0, 4, 9], [6, 5, 0, 1, 9, 3, 8, 2, 7, 4], [2, 6, 8, 9, 7, 3, 0, 5, 4, 1], [1, 7, 2, 0, 5, 8, 9, 6, 3, 4], [3, 7, 5, 2, 1, 0, 8, 9, 4, 6], [1, 2, 3, 8, 5, 0, 6, 9, 7, 4], [0, 2, 7, 4, 1, 8, 9, 5, 3, 6], [7, 8, 6, 5, 2, 4, 0, 1, 3, 9], [1, 6, 5, 0, 3, 8, 9, 7, 2, 4], [7, 4, 5, 3, 0, 1, 2, 8, 9, 6], [9, 3, 4, 8, 2, 6, 0, 5, 1, 7], [6, 1, 2, 7, 0, 3, 8, 5, 9, 4], [4, 1, 7, 0, 5, 2, 8, 6, 3, 9], [8, 7, 3, 2, 1, 5, 4, 0, 9, 6], [5, 7, 2, 1, 8, 0, 6, 3, 9, 4], [7, 4, 5, 2, 8, 0, 1, 6, 9, 3], [5, 7, 1, 3, 8, 2, 9, 6, 0, 4], [0, 5, 8, 7, 6, 1, 9, 2, 4, 3], [1, 6, 7, 0, 5, 2, 8, 9, 3, 4], [2, 6, 0, 1, 8, 9, 7, 5, 3, 4], [4, 7, 5, 0, 1, 2, 8, 9, 6, 3], [4, 5, 7, 2, 1, 8, 3, 9, 0, 6], [6, 8, 7, 4, 5, 2, 0, 3, 9, 1], [4, 7, 2, 1, 8, 6, 0, 3, 5, 9], [7, 1, 4, 8, 2, 0, 3, 5, 6, 9], [1, 6, 7, 8, 4, 0, 5, 2, 3, 9], [8, 1, 5, 4, 2, 7, 0, 6, 9, 3], [8, 5, 6, 2, 3, 0, 7, 4, 9, 1], [5, 1, 4, 3, 0, 9, 6, 8, 2, 7], [5, 2, 7, 0, 1, 9, 3, 6, 8, 4], [4, 7, 5, 0, 3, 2, 8, 9, 6, 1], [7, 5, 0, 4, 3, 1, 2, 8, 9, 6], [7, 3, 2, 0, 1, 8, 6, 9, 4, 5], [0, 7, 4, 6, 3, 1, 5, 8, 9, 2], [5, 1, 3, 7, 8, 2, 0, 9, 4, 6], [4, 7, 3, 0, 2, 5, 8, 9, 6, 1], [0, 7, 2, 5, 4, 3, 6, 8, 9, 1], [3, 0, 9, 2, 5, 4, 1, 7, 6, 8], [1, 0, 7, 4, 3, 8, 5, 2, 6, 9], [3, 0, 2, 5, 8, 7, 9, 1, 6, 4], [0, 4, 7, 2, 5, 9, 6, 8, 3, 1], [2, 6, 8, 1, 5, 0, 9, 7, 4, 3], [0, 5, 1, 4, 2, 7, 6, 3, 8, 9], [4, 5, 7, 1, 0, 2, 8, 3, 9, 6], [0, 7, 1, 4, 5, 2, 6, 8, 9, 3], [5, 7, 2, 8, 3, 1, 0, 6, 9, 4], [1, 7, 6, 9, 8, 0, 2, 3, 5, 4], [4, 7, 0, 2, 3, 8, 6, 9, 1, 5], [7, 4, 1, 5, 3, 0, 8, 9, 6, 2], [7, 2, 6, 8, 0, 9, 3, 1, 4, 5], [4, 0, 7, 3, 2, 1, 6, 5, 9, 8], [4, 1, 7, 0, 6, 5, 2, 8, 3, 9], [5, 1, 6, 7, 0, 3, 2, 9, 8, 4], [6, 2, 8, 0, 1, 9, 3, 7, 5, 4], [7, 4, 5, 2, 8, 1, 6, 0, 3, 9], [5, 7, 3, 6, 0, 4, 2, 8, 9, 1], [0, 3, 2, 1, 7, 5, 6, 9, 8, 4], [4, 7, 8, 1, 2, 5, 0, 9, 6, 3], [1, 8, 5, 2, 3, 0, 4, 7, 6, 9], [6, 4, 0, 1, 3, 5, 7, 9, 2, 8], [7, 5, 1, 4, 2, 8, 0, 3, 6, 9], [2, 0, 8, 7, 1, 3, 6, 9, 5, 4], [7, 2, 0, 8, 1, 3, 5, 6, 9, 4], [7, 2, 5, 0, 4, 3, 8, 1, 6, 9], [5, 4, 7, 2, 0, 1, 9, 8, 6, 3], [1, 4, 5, 7, 2, 0, 8, 6, 9, 3], [5, 4, 7, 2, 1, 0, 8, 9, 3, 6], [5, 4, 2, 7, 1, 0, 3, 8, 9, 6], [7, 0, 1, 6, 4, 2, 5, 3, 8, 9], [1, 7, 2, 8, 0, 5, 4, 6, 3, 9], [2, 0, 7, 6, 1, 3, 8, 9, 4, 5], [1, 7, 5, 2, 4, 6, 9, 8, 0, 3], [4, 5, 8, 7, 2, 1, 9, 6, 0, 3], [7, 1, 4, 5, 2, 0, 8, 6, 3, 9], [4, 0, 7, 1, 3, 5, 8, 2, 9, 6], [7, 2, 4, 8, 5, 0, 1, 6, 3, 9], [7, 4, 5, 8, 2, 3, 0, 9, 6, 1], [0, 9, 6, 7, 3, 1, 2, 8, 4, 5], [7, 1, 4, 2, 5, 8, 3, 6, 0, 9], [7, 2, 5, 8, 0, 3, 1, 4, 6, 9], [4, 1, 3, 2, 8, 7, 6, 0, 9, 5], [7, 2, 8, 0, 4, 1, 3, 5, 6, 9], [4, 0, 7, 2, 6, 9, 3, 8, 1, 5], [7, 4, 5, 6, 3, 1, 0, 2, 9, 8], [7, 2, 1, 8, 6, 3, 0, 9, 5, 4], [1, 2, 3, 6, 0, 4, 5, 7, 8, 9], [0, 3, 5, 1, 8, 9, 2, 7, 4, 6], [7, 2, 4, 5, 3, 1, 0, 8, 9, 6], [7, 9, 0, 1, 2, 8, 6, 5, 3, 4], [7, 8, 4, 5, 3, 0, 2, 9, 6, 1], [4, 5, 2, 1, 0, 7, 8, 3, 9, 6], [7, 1, 6, 4, 5, 2, 8, 0, 3, 9], [5, 7, 4, 0, 2, 8, 1, 9, 6, 3], [3, 5, 2, 4, 7, 1, 8, 9, 0, 6], [7, 2, 8, 5, 3, 4, 6, 1, 9, 0], [7, 0, 4, 1, 2, 9, 5, 8, 3, 6], [0, 3, 9, 7, 1, 6, 2, 8, 4, 5], [5, 1, 3, 6, 8, 0, 7, 2, 4, 9], [3, 0, 6, 2, 7, 8, 9, 1, 5, 4], [7, 4, 1, 0, 3, 8, 5, 6, 9, 2], [1, 7, 8, 2, 0, 3, 6, 9, 5, 4], [7, 5, 2, 8, 0, 3, 9, 6, 1, 4], [7, 2, 1, 0, 4, 5, 3, 6, 8, 9], [7, 2, 0, 1, 8, 3, 6, 9, 5, 4], [7, 5, 4, 0, 1, 2, 8, 6, 9, 3], [2, 8, 3, 0, 5, 1, 6, 7, 9, 4], [6, 7, 0, 9, 2, 1, 8, 3, 5, 4], [7, 5, 1, 4, 0, 3, 8, 2, 9, 6], [0, 4, 5, 7, 8, 2, 6, 3, 1, 9], [0, 5, 3, 2, 6, 7, 1, 9, 8, 4], [6, 1, 9, 7, 4, 8, 3, 2, 0, 5], [0, 3, 6, 1, 7, 2, 8, 9, 5, 4], [3, 7, 4, 5, 6, 0, 1, 2, 8, 9], [7, 1, 3, 8, 6, 0, 5, 2, 9, 4], [7, 2, 4, 5, 3, 6, 1, 0, 8, 9], [7, 2, 3, 0, 8, 1, 5, 9, 6, 4], [0, 5, 3, 4, 7, 2, 6, 9, 8, 1]]
| 53,342.333333
| 80,044
| 0.312566
| 50,004
| 160,027
| 1.00026
| 0.00026
| 0.050623
| 0.018234
| 0.006158
| 0.985865
| 0.790631
| 0.432013
| 0.188236
| 0.078133
| 0.039227
| 0
| 0.45445
| 0.312472
| 160,027
| 2
| 80,045
| 80,013.5
| 0.000155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
128f4f3856b8aaadcedf5d21ca56d8b5d3b0b03d
| 142
|
py
|
Python
|
tests/test_sample.py
|
apegadoboureghida/redis-task-queue
|
fac68049abfbc8294b9e866ae28625472b9be002
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sample.py
|
apegadoboureghida/redis-task-queue
|
fac68049abfbc8294b9e866ae28625472b9be002
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sample.py
|
apegadoboureghida/redis-task-queue
|
fac68049abfbc8294b9e866ae28625472b9be002
|
[
"BSD-3-Clause"
] | null | null | null |
# Sample Test passing with nose and pytest
from redis_task_queue import ModelQueue
def test_pass():
assert True, "dummy sample test"
| 23.666667
| 42
| 0.753521
| 21
| 142
| 4.952381
| 0.857143
| 0.192308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197183
| 142
| 5
| 43
| 28.4
| 0.912281
| 0.28169
| 0
| 0
| 0
| 0
| 0.17
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
12d7a51e3603f8d7fe2c629df3c4f2aa69d9426c
| 84,990
|
py
|
Python
|
biserici_inlemnite/nomenclatoare/migrations/0001_initial.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
biserici_inlemnite/nomenclatoare/migrations/0001_initial.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
biserici_inlemnite/nomenclatoare/migrations/0001_initial.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.13 on 2021-07-29 13:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('biserici', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AmplasamentBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Amplasamente Biserică',
},
),
migrations.CreateModel(
name='AmplasareTurn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Amplasări Turn',
},
),
migrations.CreateModel(
name='CultBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Culturi Biserică',
},
),
migrations.CreateModel(
name='DecorTurn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Decoruri Turn',
},
),
migrations.CreateModel(
name='DimensiuneTurn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Dimensiuni Turn',
},
),
migrations.CreateModel(
name='ElementAnsambluConstruit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Elemente Ansamblu Construit',
},
),
migrations.CreateModel(
name='ElementeBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Elemente Biserica',
},
),
migrations.CreateModel(
name='ElementImportant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Elemente Importante',
},
),
migrations.CreateModel(
name='EsentaLemnoasa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Esenta Lemnoasa',
},
),
migrations.CreateModel(
name='Eveniment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Evenimente',
},
),
migrations.CreateModel(
name='Finisaj',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Finisaj',
},
),
migrations.CreateModel(
name='FinisajExterior',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Finisaj Exterior',
},
),
migrations.CreateModel(
name='FinisajIconostas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Finisaje Iconostas',
},
),
migrations.CreateModel(
name='FunctiuneBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Funcțiuni Biserică',
},
),
migrations.CreateModel(
name='GalerieTurn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Galerii Turn',
},
),
migrations.CreateModel(
name='Judet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=50)),
('cod', models.CharField(max_length=2)),
],
options={
'verbose_name_plural': 'Județe',
},
),
migrations.CreateModel(
name='Localitate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=50)),
('judet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nomenclatoare.judet')),
],
options={
'verbose_name_plural': 'Localități',
},
),
migrations.CreateModel(
name='LocalizarePictura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Localizări Pictură',
},
),
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Materiale',
},
),
migrations.CreateModel(
name='MaterialFinisajPardoseli',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Material Finisaj Pardoseli',
},
),
migrations.CreateModel(
name='MaterialFinisajPeretiInteriori',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Material Finisaj Pereti Interiori',
},
),
migrations.CreateModel(
name='PeisagisticaSit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Peisagistică Sit',
},
),
migrations.CreateModel(
name='Persoana',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Persoane',
},
),
migrations.CreateModel(
name='Planimetrie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Planimetrii',
},
),
migrations.CreateModel(
name='PlanTurn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Planuri Turn',
},
),
migrations.CreateModel(
name='ProprietateBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Proprietăți Biserică',
},
),
migrations.CreateModel(
name='RelatieCimitir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Relație Cimitir',
},
),
migrations.CreateModel(
name='Secol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=6)),
],
options={
'verbose_name_plural': 'Secole',
},
),
migrations.CreateModel(
name='SingularitateBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Singularități Biserică',
},
),
migrations.CreateModel(
name='StatutBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Statuturi Biserică',
},
),
migrations.CreateModel(
name='Studiu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('fisier', models.FileField(upload_to='')),
],
options={
'verbose_name_plural': 'Studii',
},
),
migrations.CreateModel(
name='StudiuDendocronologic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('fisier', models.FileField(upload_to='')),
('an', models.IntegerField(blank=True, null=True)),
('autor', models.CharField(blank=True, max_length=150, null=True)),
('detalii', models.TextField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Studii dendocronologice',
},
),
migrations.CreateModel(
name='SursaDatare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Surse Datări',
},
),
migrations.CreateModel(
name='TehnicaPictura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tehnici Pictura',
},
),
migrations.CreateModel(
name='TipBatereSindrila',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Batere Sindrila',
},
),
migrations.CreateModel(
name='TipBotSindrila',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Bot Sindrila',
},
),
migrations.CreateModel(
name='TipFundatie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Fundatie',
},
),
migrations.CreateModel(
name='TipIconostas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Iconostas',
},
),
migrations.CreateModel(
name='TipPrelucrareSindrila',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Prelucrare Sindrila',
},
),
migrations.CreateModel(
name='TipPrindereSindrila',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Prindere Sindrila',
},
),
migrations.CreateModel(
name='TipSarpanta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Sarpanta',
},
),
migrations.CreateModel(
name='TipStructuraCatei',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Structură Căței',
},
),
migrations.CreateModel(
name='TipStructuraCheotoare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Structură Cheotoare',
},
),
migrations.CreateModel(
name='TipTurn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Tipuri Turn',
},
),
migrations.CreateModel(
name='TopografieBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Topografii Biserică',
},
),
migrations.CreateModel(
name='UtilizareBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
],
options={
'verbose_name_plural': 'Utilizări Biserică',
},
),
migrations.CreateModel(
name='ZugravBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('istoric', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='biserici.istoric')),
('persoana', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nomenclatoare.persoana')),
],
options={
'verbose_name_plural': 'ugravi',
},
),
migrations.CreateModel(
name='StudiuIstoric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('fisier', models.FileField(upload_to='')),
('drepturi_de_autor', models.TextField()),
('istoric', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='biserici.istoric')),
],
options={
'verbose_name_plural': 'tudii Istorice',
},
),
migrations.CreateModel(
name='PersonalitateBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('istoric', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='biserici.istoric')),
('persoana', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nomenclatoare.persoana')),
],
options={
'verbose_name_plural': 'Personalități Biserică',
},
),
migrations.CreateModel(
name='MutareBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latitudine', models.FloatField(blank=True, null=True)),
('longitudine', models.FloatField(blank=True, null=True)),
('istoric', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='biserici.istoric')),
('localitate', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nomenclatoare.localitate')),
],
options={
'verbose_name_plural': 'Mutări Biserică',
},
),
migrations.CreateModel(
name='MesterBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('istoric', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='biserici.istoric')),
('persoana', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nomenclatoare.persoana')),
],
options={
'verbose_name_plural': 'eșteri',
},
),
migrations.CreateModel(
name='HistoricalZugravBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
('persoana', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.persoana')),
],
options={
'verbose_name': 'historical zugrav biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalUtilizareBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical utilizare biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTopografieBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical topografie biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipTurn',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip turn',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipStructuraCheotoare',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip structura cheotoare',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipStructuraCatei',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip structura catei',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipSarpanta',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip sarpanta',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipPrindereSindrila',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip prindere sindrila',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipPrelucrareSindrila',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip prelucrare sindrila',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipIconostas',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip iconostas',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipFundatie',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip fundatie',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipBotSindrila',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip bot sindrila',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTipBatereSindrila',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tip batere sindrila',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTehnicaPictura',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical tehnica pictura',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSursaDatare',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical sursa datare',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalStudiuIstoric',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('fisier', models.TextField(max_length=100)),
('drepturi_de_autor', models.TextField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
],
options={
'verbose_name': 'historical studiu istoric',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalStudiuDendocronologic',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('fisier', models.TextField(max_length=100)),
('an', models.IntegerField(blank=True, null=True)),
('autor', models.CharField(blank=True, max_length=150, null=True)),
('detalii', models.TextField(blank=True, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical studiu dendocronologic',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalStudiu',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('fisier', models.TextField(max_length=100)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical studiu',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalStatutBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical statut biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSingularitateBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical singularitate biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSecol',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=6)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical secol',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalRelatieCimitir',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical relatie cimitir',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalProprietateBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical proprietate biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPlanTurn',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical plan turn',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPlanimetrie',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical planimetrie',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPersonalitateBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
('persoana', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.persoana')),
],
options={
'verbose_name': 'historical personalitate biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPersoana',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical persoana',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPeisagisticaSit',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical peisagistica sit',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMutareBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('latitudine', models.FloatField(blank=True, null=True)),
('longitudine', models.FloatField(blank=True, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
('localitate', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.localitate')),
],
options={
'verbose_name': 'historical mutare biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMesterBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
('persoana', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.persoana')),
],
options={
'verbose_name': 'historical mester biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMaterialFinisajPeretiInteriori',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical material finisaj pereti interiori',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMaterialFinisajPardoseli',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical material finisaj pardoseli',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMaterial',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical material',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalLocalizarePictura',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical localizare pictura',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalLocalitate',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=50)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('judet', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.judet')),
],
options={
'verbose_name': 'historical localitate',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalJudet',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=50)),
('cod', models.CharField(max_length=2)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical judet',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalGalerieTurn',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical galerie turn',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalFunctiuneBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical functiune biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalFinisajIconostas',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical finisaj iconostas',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalFinisajExterior',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical finisaj exterior',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalFinisaj',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical finisaj',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalEvenimentBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('eveniment', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.eveniment')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
],
options={
'verbose_name': 'historical eveniment biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalEveniment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical eveniment',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalEsentaLemnoasa',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical esenta lemnoasa',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalElementImportant',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical element important',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalElementeBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical elemente biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalElementAnsambluConstruit',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical element ansamblu construit',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalDimensiuneTurn',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical dimensiune turn',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalDecorTurn',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical decor turn',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCultBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical cult biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCtitorBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('istoric', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='biserici.istoric')),
('persoana', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nomenclatoare.persoana')),
],
options={
'verbose_name': 'historical ctitor biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalAmplasareTurn',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical amplasare turn',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalAmplasamentBiserica',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('nume', models.CharField(max_length=150)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical amplasament biserica',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='EvenimentBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('eveniment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nomenclatoare.eveniment')),
('istoric', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='biserici.istoric')),
],
options={
'verbose_name_plural': 'venimente Istorice',
},
),
migrations.CreateModel(
name='CtitorBiserica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detalii', models.TextField()),
('sursa', models.TextField()),
('istoric', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='biserici.istoric')),
('persoana', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nomenclatoare.persoana')),
],
options={
'verbose_name_plural': 'titori',
},
),
]
| 54.480769
| 191
| 0.562819
| 7,804
| 84,990
| 5.908252
| 0.03857
| 0.050577
| 0.058168
| 0.077557
| 0.895224
| 0.892426
| 0.889932
| 0.888891
| 0.888458
| 0.888414
| 0
| 0.00861
| 0.285316
| 84,990
| 1,559
| 192
| 54.515715
| 0.750482
| 0.000541
| 0
| 0.783505
| 1
| 0
| 0.184691
| 0.031421
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005155
| 0
| 0.007732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12d98cf222ef60c43e04a7e39384d60d9e99e57c
| 4,208
|
py
|
Python
|
junction/proposals/migrations/0026_auto_20200323_2010.py
|
theSage21/junction
|
ac713edcf56c41eb3f066da776a0a5d24e55b46a
|
[
"MIT"
] | 192
|
2015-01-12T06:21:24.000Z
|
2022-03-10T09:57:37.000Z
|
junction/proposals/migrations/0026_auto_20200323_2010.py
|
theSage21/junction
|
ac713edcf56c41eb3f066da776a0a5d24e55b46a
|
[
"MIT"
] | 621
|
2015-01-01T09:19:17.000Z
|
2021-05-28T09:27:35.000Z
|
junction/proposals/migrations/0026_auto_20200323_2010.py
|
theSage21/junction
|
ac713edcf56c41eb3f066da776a0a5d24e55b46a
|
[
"MIT"
] | 207
|
2015-01-05T16:39:06.000Z
|
2022-02-15T13:18:15.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-03-23 14:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0025_auto_20200321_0049"),
]
operations = [
migrations.AlterField(
model_name="proposalcomment",
name="marked_as_spam_by",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="marked_as_spam_by",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="proposalsection",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="created_proposalsection_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Created By",
),
),
migrations.AlterField(
model_name="proposalsection",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="updated_proposalsection_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Modified By",
),
),
migrations.AlterField(
model_name="proposalsectionreviewer",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="created_proposalsectionreviewer_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Created By",
),
),
migrations.AlterField(
model_name="proposalsectionreviewer",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="updated_proposalsectionreviewer_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Modified By",
),
),
migrations.AlterField(
model_name="proposalsectionreviewervotevalue",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="created_proposalsectionreviewervotevalue_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Created By",
),
),
migrations.AlterField(
model_name="proposalsectionreviewervotevalue",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="updated_proposalsectionreviewervotevalue_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Modified By",
),
),
migrations.AlterField(
model_name="proposaltype",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="created_proposaltype_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Created By",
),
),
migrations.AlterField(
model_name="proposaltype",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="updated_proposaltype_set",
to=settings.AUTH_USER_MODEL,
verbose_name="Modified By",
),
),
]
| 33.664
| 76
| 0.529468
| 357
| 4,208
| 5.960784
| 0.173669
| 0.06203
| 0.105733
| 0.12265
| 0.870771
| 0.845865
| 0.803102
| 0.740132
| 0.740132
| 0.663064
| 0
| 0.011992
| 0.385694
| 4,208
| 124
| 77
| 33.935484
| 0.811219
| 0.015447
| 0
| 0.820513
| 1
| 0
| 0.16256
| 0.094928
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
420733e741bee79a8e3061a5b9e2346c653d9ea7
| 274
|
py
|
Python
|
catalyst/tools/meters/__init__.py
|
and-kul/catalyst
|
51428d7756e62b9b8ee5379f38e9fd576eeb36e5
|
[
"Apache-2.0"
] | 1
|
2020-11-14T13:35:22.000Z
|
2020-11-14T13:35:22.000Z
|
catalyst/tools/meters/__init__.py
|
and-kul/catalyst
|
51428d7756e62b9b8ee5379f38e9fd576eeb36e5
|
[
"Apache-2.0"
] | 1
|
2021-01-07T16:13:45.000Z
|
2021-01-21T09:27:54.000Z
|
catalyst/tools/meters/__init__.py
|
and-kul/catalyst
|
51428d7756e62b9b8ee5379f38e9fd576eeb36e5
|
[
"Apache-2.0"
] | 1
|
2021-01-07T02:50:38.000Z
|
2021-01-07T02:50:38.000Z
|
# flake8: noqa
from catalyst.tools.meters.meter import Meter
from catalyst.tools.meters.averagevaluemeter import AverageValueMeter
from catalyst.tools.meters.confusionmeter import ConfusionMeter
from catalyst.tools.meters.ppv_tpr_f1_meter import PrecisionRecallF1ScoreMeter
| 45.666667
| 78
| 0.879562
| 33
| 274
| 7.212121
| 0.424242
| 0.201681
| 0.285714
| 0.386555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.069343
| 274
| 5
| 79
| 54.8
| 0.921569
| 0.043796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4214a7302f8ef16fd123ca73c6a2889d845f9220
| 167
|
py
|
Python
|
Website/OperatorApp/tests/__init__.py
|
uofg-cs20/oti-operator
|
76aaeb021280dfb40bfc8cf04b2636c85b48ff48
|
[
"Apache-2.0"
] | null | null | null |
Website/OperatorApp/tests/__init__.py
|
uofg-cs20/oti-operator
|
76aaeb021280dfb40bfc8cf04b2636c85b48ff48
|
[
"Apache-2.0"
] | null | null | null |
Website/OperatorApp/tests/__init__.py
|
uofg-cs20/oti-operator
|
76aaeb021280dfb40bfc8cf04b2636c85b48ff48
|
[
"Apache-2.0"
] | null | null | null |
from .test_operator_create import *
from .test_operator_edit import *
from .test_operator_api import *
from .test_hypercat import *
from .test_operator_login import *
| 27.833333
| 35
| 0.820359
| 24
| 167
| 5.333333
| 0.375
| 0.3125
| 0.5
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11976
| 167
| 5
| 36
| 33.4
| 0.870748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
422cbba634867ce2c0faa0516131c2a5d2da2fc2
| 8,714
|
py
|
Python
|
authorizenet/migrations/0001_initial.py
|
ajmirsky/django-authorizenet
|
0342e79eabffc49a85a40b0937b5e89663c334ca
|
[
"MIT"
] | 18
|
2015-01-03T22:54:21.000Z
|
2021-11-15T11:54:32.000Z
|
authorizenet/migrations/0001_initial.py
|
ajmirsky/django-authorizenet
|
0342e79eabffc49a85a40b0937b5e89663c334ca
|
[
"MIT"
] | 1
|
2016-08-31T21:00:40.000Z
|
2016-08-31T21:00:40.000Z
|
authorizenet/migrations/0001_initial.py
|
ajmirsky/django-authorizenet
|
0342e79eabffc49a85a40b0937b5e89663c334ca
|
[
"MIT"
] | 19
|
2015-01-02T07:34:26.000Z
|
2021-09-09T03:24:37.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Response'
db.create_table('authorizenet_response', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('response_code', self.gf('django.db.models.fields.CharField')(max_length=2)),
('response_subcode', self.gf('django.db.models.fields.CharField')(max_length=10)),
('response_reason_code', self.gf('django.db.models.fields.CharField')(max_length=15)),
('response_reason_text', self.gf('django.db.models.fields.TextField')()),
('auth_code', self.gf('django.db.models.fields.CharField')(max_length=10)),
('avs_code', self.gf('django.db.models.fields.CharField')(max_length=10)),
('trans_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('invoice_num', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
('amount', self.gf('django.db.models.fields.CharField')(max_length=16)),
('method', self.gf('django.db.models.fields.CharField')(max_length=10)),
('type', self.gf('django.db.models.fields.CharField')(max_length=20, db_index=True)),
('cust_id', self.gf('django.db.models.fields.CharField')(max_length=20)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('company', self.gf('django.db.models.fields.CharField')(max_length=50)),
('address', self.gf('django.db.models.fields.CharField')(max_length=60)),
('city', self.gf('django.db.models.fields.CharField')(max_length=40)),
('state', self.gf('django.db.models.fields.CharField')(max_length=40)),
('zip', self.gf('django.db.models.fields.CharField')(max_length=20)),
('country', self.gf('django.db.models.fields.CharField')(max_length=60)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=25)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=25)),
('email', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ship_to_first_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('ship_to_last_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('ship_to_company', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('ship_to_address', self.gf('django.db.models.fields.CharField')(max_length=60, blank=True)),
('ship_to_city', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
('ship_to_state', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
('ship_to_zip', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('ship_to_country', self.gf('django.db.models.fields.CharField')(max_length=60, blank=True)),
('tax', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('duty', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('freight', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('tax_exempt', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('po_num', self.gf('django.db.models.fields.CharField')(max_length=25, blank=True)),
('MD5_Hash', self.gf('django.db.models.fields.CharField')(max_length=255)),
('cvv2_resp_code', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('cavv_response', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('test_request', self.gf('django.db.models.fields.CharField')(default='FALSE', max_length=10, blank=True)),
))
db.send_create_signal('authorizenet', ['Response'])
def backwards(self, orm):
# Deleting model 'Response'
db.delete_table('authorizenet_response')
models = {
'authorizenet.response': {
'MD5_Hash': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'Meta': {'object_name': 'Response'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'amount': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'auth_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'avs_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'cavv_response': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'cust_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'cvv2_resp_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duty': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'freight': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_num': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'po_num': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'response_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'response_reason_code': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'response_reason_text': ('django.db.models.fields.TextField', [], {}),
'response_subcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'ship_to_address': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'ship_to_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'ship_to_company': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ship_to_country': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'ship_to_first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ship_to_last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ship_to_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'ship_to_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'tax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'tax_exempt': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'test_request': ('django.db.models.fields.CharField', [], {'default': "'FALSE'", 'max_length': '10', 'blank': 'True'}),
'trans_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['authorizenet']
| 77.115044
| 131
| 0.602134
| 1,057
| 8,714
| 4.803217
| 0.092715
| 0.133937
| 0.231633
| 0.330904
| 0.865866
| 0.856805
| 0.851684
| 0.848927
| 0.757731
| 0.677762
| 0
| 0.023343
| 0.174088
| 8,714
| 112
| 132
| 77.803571
| 0.68209
| 0.007459
| 0
| 0
| 0
| 0
| 0.506651
| 0.327935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019802
| false
| 0
| 0.039604
| 0
| 0.089109
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
422e9ab3e48c896fb72b96888094e3448589c44e
| 166
|
py
|
Python
|
computing_and_control/ipcv/__init__.py
|
aap5869/RIT
|
d8a408e59a94b0edde56a207592fd7b803172119
|
[
"MIT"
] | null | null | null |
computing_and_control/ipcv/__init__.py
|
aap5869/RIT
|
d8a408e59a94b0edde56a207592fd7b803172119
|
[
"MIT"
] | null | null | null |
computing_and_control/ipcv/__init__.py
|
aap5869/RIT
|
d8a408e59a94b0edde56a207592fd7b803172119
|
[
"MIT"
] | null | null | null |
#from .histogram_brute_force import histogram
#from .histogram_where import histogram
#from .histogram_numpy import histogram
from .histogram_opencv import histogram
| 33.2
| 45
| 0.861446
| 21
| 166
| 6.571429
| 0.380952
| 0.376812
| 0.413043
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 166
| 4
| 46
| 41.5
| 0.92
| 0.722892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
427cee61c14e07fd3b0d001e616f4ecd9baa32d6
| 56,048
|
py
|
Python
|
release/stubs/Grasshopper/Kernel/Graphs.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs/Grasshopper/Kernel/Graphs.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs/Grasshopper/Kernel/Graphs.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
# encoding: utf-8
# module Grasshopper.Kernel.Graphs calls itself Graphs
# from Grasshopper, Version=1.0.0.20, Culture=neutral, PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# functions
def GH_GraphProxyObject(n_owner): # real signature unknown; restored from __doc__
""" GH_GraphProxyObject(n_owner: IGH_Graph) """
pass
# classes
class GH_AbstractGraph(object, IGH_Graph, GH_ISerializable):
# no doc
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_AbstractGraph) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_AbstractGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_AbstractGraph) """
pass
def CurveToPointFArray(self, *args): #cannot find CLR method
""" CurveToPointFArray(Crv: Curve, dest: RectangleF) -> Array[PointF] """
pass
def Draw_PostRenderGraph(self, g, cnt):
""" Draw_PostRenderGraph(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrid(self, g, cnt):
""" Draw_PostRenderGrid(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrip(self, g, cnt, index):
""" Draw_PostRenderGrip(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer, index: int) """
pass
def Draw_PostRenderTags(self, g, cnt):
""" Draw_PostRenderTags(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrid(self, g, cnt):
""" Draw_PreRenderGrid(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self, g, cnt, index):
""" Draw_PreRenderGrip(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer, index: int) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderTags(self, g, cnt):
""" Draw_PreRenderTags(self: GH_AbstractGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Duplicate(self):
""" Duplicate(self: GH_AbstractGraph) -> IGH_Graph """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: GH_AbstractGraph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_AbstractGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def IntersectionEvaluate(self, *args): #cannot find CLR method
""" IntersectionEvaluate(C: Curve, offset: float) -> float """
pass
def OnGraphChanged(self, bIntermediate):
""" OnGraphChanged(self: GH_AbstractGraph, bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: GH_AbstractGraph) """
pass
def Read(self, reader):
""" Read(self: GH_AbstractGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_AbstractGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_AbstractGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_AbstractGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, nName: str, nDescription: str) """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
Description = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Description(self: GH_AbstractGraph) -> str
"""
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_AbstractGraph) -> Guid
"""
Grips = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Grips(self: GH_AbstractGraph) -> List[GH_GraphGrip]
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_AbstractGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_AbstractGraph) -> bool
"""
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Name(self: GH_AbstractGraph) -> str
"""
GH_Evaluator = None
GraphChanged = None
class GH_BezierGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_BezierGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_BezierGraph) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_BezierGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_BezierGraph) """
pass
def Curve(self, *args): #cannot find CLR method
""" Curve(self: GH_BezierGraph) -> Curve """
pass
def Draw_PreRenderGrip(self, g, cnt, index):
""" Draw_PreRenderGrip(self: GH_BezierGraph, g: Graphics, cnt: GH_GraphContainer, index: int) -> GH_GraphDrawInstruction """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_BezierGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_BezierGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_BezierGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_BezierGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_BezierGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_BezierGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_BezierGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_BezierGraph) -> bool
"""
class GH_ConicGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_ConicGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_ConicGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_ConicGraph) """
pass
def Curve(self, *args): #cannot find CLR method
""" Curve(self: GH_ConicGraph) -> NurbsCurve """
pass
def DestroyCurve(self, *args): #cannot find CLR method
""" DestroyCurve(self: GH_ConicGraph) """
pass
def FitConic(self, *args): #cannot find CLR method
""" FitConic(self: GH_ConicGraph, S: Point3d) -> NurbsCurve """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_ConicGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def MakeConic(self, *args): #cannot find CLR method
""" MakeConic(self: GH_ConicGraph, w: float) -> NurbsCurve """
pass
def Read(self, reader):
""" Read(self: GH_ConicGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_ConicGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_ConicGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_ConicGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_ConicGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_ConicGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_ConicGraph) -> bool
"""
class GH_DoubleSineGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_DoubleSineGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_DoubleSineGraph) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_DoubleSineGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_DoubleSineGraph) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: GH_DoubleSineGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self, g, cnt, index):
""" Draw_PreRenderGrip(self: GH_DoubleSineGraph, g: Graphics, cnt: GH_GraphContainer, index: int) -> GH_GraphDrawInstruction """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_DoubleSineGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def GraphAccuracy(self, *args): #cannot find CLR method
""" GraphAccuracy(self: GH_DoubleSineGraph, reg: RectangleF) -> float """
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_DoubleSineGraph, reader: GH_IReader) -> bool """
pass
def RecFromPoints(self, *args): #cannot find CLR method
""" RecFromPoints(self: GH_DoubleSineGraph, a: PointF, b: PointF) -> Rectangle """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_DoubleSineGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_DoubleSineGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_DoubleSineGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_DoubleSineGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_DoubleSineGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_DoubleSineGraph) -> bool
"""
m_eq0 = None
m_eq1 = None
m_path0 = None
m_path1 = None
class GH_GaussianGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_GaussianGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_GaussianGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_GaussianGraph) """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_GaussianGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_GaussianGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_GaussianGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_GaussianGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_GaussianGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_GaussianGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_GaussianGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_GaussianGraph) -> bool
"""
class GH_GraphContainer(object, GH_ISerializable, IGH_ResponsiveObject):
"""
GH_GraphContainer(n_graph: IGH_Graph)
GH_GraphContainer(n_graph: IGH_Graph, n_x0: float, n_x1: float, n_y0: float, n_y1: float)
"""
def ClearCaches(self):
""" ClearCaches(self: GH_GraphContainer) """
pass
def Duplicate(self):
""" Duplicate(self: GH_GraphContainer) -> GH_GraphContainer """
pass
def FromX(self, t):
""" FromX(self: GH_GraphContainer, t: float) -> float """
pass
def FromY(self, t):
""" FromY(self: GH_GraphContainer, t: float) -> float """
pass
def Internal_Render_Graph(self, *args): #cannot find CLR method
""" Internal_Render_Graph(self: GH_GraphContainer, G: Graphics) """
pass
def Internal_Render_Grip(self, *args): #cannot find CLR method
""" Internal_Render_Grip(self: GH_GraphContainer, g: Graphics, x: int, y: int) """
pass
def Internal_Render_Grips(self, *args): #cannot find CLR method
""" Internal_Render_Grips(self: GH_GraphContainer, G: Graphics) """
pass
def Internal_Render_HorizontalConstraint(self, *args): #cannot find CLR method
""" Internal_Render_HorizontalConstraint(self: GH_GraphContainer, g: Graphics, y: int) """
pass
def Internal_Render_InvalidIcon(self, *args): #cannot find CLR method
""" Internal_Render_InvalidIcon(self: GH_GraphContainer, g: Graphics) """
pass
def Internal_Render_LockedIcon(self, *args): #cannot find CLR method
""" Internal_Render_LockedIcon(self: GH_GraphContainer, g: Graphics) """
pass
def Internal_Render_TagGDIObjects(self, *args): #cannot find CLR method
""" Internal_Render_TagGDIObjects(self: GH_GraphContainer, zoom: Single, bg_brush: SolidBrush, fg_brush: SolidBrush, fg_pen: Pen) -> (SolidBrush, SolidBrush, Pen) """
pass
def Internal_Render_TagX(self, *args): #cannot find CLR method
""" Internal_Render_TagX(self: GH_GraphContainer, g: Graphics, graphrec: RectangleF, r_a: float, r_b: float) """
pass
def Internal_Render_TagY(self, *args): #cannot find CLR method
""" Internal_Render_TagY(self: GH_GraphContainer, g: Graphics, graphrec: RectangleF, r_a: float, r_b: float) """
pass
def Internal_Render_TextTag(self, *args): #cannot find CLR method
""" Internal_Render_TextTag(self: GH_GraphContainer, g: Graphics, graphrec: RectangleF, lowerright: bool, tag: str) """
pass
def Internal_Render_VerticalConstraint(self, *args): #cannot find CLR method
""" Internal_Render_VerticalConstraint(self: GH_GraphContainer, g: Graphics, x: int) """
pass
def NearestGrip(self, *args): #cannot find CLR method
""" NearestGrip(self: GH_GraphContainer, pt: PointF, max_search: float) -> int """
pass
def OnGraphChanged(self, bIntermediate):
""" OnGraphChanged(self: GH_GraphContainer, bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: GH_GraphContainer) """
pass
def Read(self, reader):
""" Read(self: GH_GraphContainer, reader: GH_IReader) -> bool """
pass
def RemapPointsToGraphRegion(self, pts):
""" RemapPointsToGraphRegion(self: GH_GraphContainer, pts: Array[PointF]) """
pass
def Render(self, G, bIncludeDomainTags, samples):
""" Render(self: GH_GraphContainer, G: Graphics, bIncludeDomainTags: bool, samples: List[float]) """
pass
@staticmethod
def Render_GraphBackground(G, region, bActive):
""" Render_GraphBackground(G: Graphics, region: RectangleF, bActive: bool) """
pass
@staticmethod
def Render_GraphGrid(G, region):
""" Render_GraphGrid(G: Graphics, region: RectangleF) """
pass
@staticmethod
def Render_GraphPen():
""" Render_GraphPen() -> Pen """
pass
@staticmethod
def Render_GuidePen():
""" Render_GuidePen() -> Pen """
pass
@staticmethod
def Render_HorizontalConstraint(g, rec, t):
""" Render_HorizontalConstraint(g: Graphics, rec: RectangleF, t: float) """
pass
@staticmethod
def Render_VerticalConstraint(g, rec, t):
""" Render_VerticalConstraint(g: Graphics, rec: RectangleF, t: float) """
pass
def RespondToKeyDown(self, sender, e):
""" RespondToKeyDown(self: GH_GraphContainer, sender: GH_Canvas, e: KeyEventArgs) -> GH_ObjectResponse """
pass
def RespondToKeyUp(self, sender, e):
""" RespondToKeyUp(self: GH_GraphContainer, sender: GH_Canvas, e: KeyEventArgs) -> GH_ObjectResponse """
pass
def RespondToMouseDoubleClick(self, sender, e):
""" RespondToMouseDoubleClick(self: GH_GraphContainer, sender: GH_Canvas, e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def RespondToMouseDown(self, sender, e):
""" RespondToMouseDown(self: GH_GraphContainer, sender: GH_Canvas, e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def RespondToMouseMove(self, sender, e):
""" RespondToMouseMove(self: GH_GraphContainer, sender: GH_Canvas, e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def RespondToMouseUp(self, sender, e):
""" RespondToMouseUp(self: GH_GraphContainer, sender: GH_Canvas, e: GH_CanvasMouseEvent) -> GH_ObjectResponse """
pass
def SolveGraphPath(self, *args): #cannot find CLR method
""" SolveGraphPath(self: GH_GraphContainer) -> GraphicsPath """
pass
def ToRegionBox(self, pt):
""" ToRegionBox(self: GH_GraphContainer, pt: PointF) -> PointF """
pass
def ToRegionBox_x(self, x):
""" ToRegionBox_x(self: GH_GraphContainer, x: float) -> Single """
pass
def ToRegionBox_y(self, y):
""" ToRegionBox_y(self: GH_GraphContainer, y: float) -> Single """
pass
def ToUnitBox(self, pt):
""" ToUnitBox(self: GH_GraphContainer, pt: PointF) -> PointF """
pass
def ToX(self, t_unit):
""" ToX(self: GH_GraphContainer, t_unit: float) -> float """
pass
def ToY(self, t_unit):
""" ToY(self: GH_GraphContainer, t_unit: float) -> float """
pass
def TryValueAt(self, t):
""" TryValueAt(self: GH_GraphContainer, t: float) -> float """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_GraphContainer, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_GraphContainer, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, n_graph, n_x0=None, n_x1=None, n_y0=None, n_y1=None):
"""
__new__(cls: type, n_graph: IGH_Graph)
__new__(cls: type, n_graph: IGH_Graph, n_x0: float, n_x1: float, n_y0: float, n_y1: float)
"""
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
Graph = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Graph(self: GH_GraphContainer) -> IGH_Graph
Set: Graph(self: GH_GraphContainer) = value
"""
LockGrips = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: LockGrips(self: GH_GraphContainer) -> bool
Set: LockGrips(self: GH_GraphContainer) = value
"""
Region = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Region(self: GH_GraphContainer) -> RectangleF
Set: Region(self: GH_GraphContainer) = value
"""
X0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: X0(self: GH_GraphContainer) -> float
Set: X0(self: GH_GraphContainer) = value
"""
X1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: X1(self: GH_GraphContainer) -> float
Set: X1(self: GH_GraphContainer) = value
"""
Y0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Y0(self: GH_GraphContainer) -> float
Set: Y0(self: GH_GraphContainer) = value
"""
Y1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Y1(self: GH_GraphContainer) -> float
Set: Y1(self: GH_GraphContainer) = value
"""
GraphChanged = None
GraphChangedEventHandler = None
m_graphpath = None
class GH_GraphDrawInstruction(Enum, IComparable, IFormattable, IConvertible):
""" enum GH_GraphDrawInstruction, values: none (0), skip (1) """
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
none = None
skip = None
value__ = None
class GH_GraphGrip(object):
"""
GH_GraphGrip()
GH_GraphGrip(nX: float, nY: float)
GH_GraphGrip(nX: float, nY: float, nConstraint: GH_GripConstraint)
GH_GraphGrip(nOther: GH_GraphGrip)
"""
def LimitToUnitDomain(self, bLimitX, bLimitY):
""" LimitToUnitDomain(self: GH_GraphGrip, bLimitX: bool, bLimitY: bool) """
pass
def OnGripChanged(self, bIntermediate):
""" OnGripChanged(self: GH_GraphGrip, bIntermediate: bool) """
pass
def SetIndex(self, nIndex):
""" SetIndex(self: GH_GraphGrip, nIndex: int) """
pass
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==y """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type, nX: float, nY: float)
__new__(cls: type, nX: float, nY: float, nConstraint: GH_GripConstraint)
__new__(cls: type, nOther: GH_GraphGrip)
"""
pass
def __ne__(self, *args): #cannot find CLR method
pass
Constraint = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Constraint(self: GH_GraphGrip) -> GH_GripConstraint
Set: Constraint(self: GH_GraphGrip) = value
"""
Index = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Index(self: GH_GraphGrip) -> int
"""
Point = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Point(self: GH_GraphGrip) -> PointF
"""
X = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: X(self: GH_GraphGrip) -> float
Set: X(self: GH_GraphGrip) = value
"""
Y = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Y(self: GH_GraphGrip) -> float
Set: Y(self: GH_GraphGrip) = value
"""
GripChanged = None
GripChangedEventHandler = None
m_c = None
m_i = None
m_x = None
m_y = None
class GH_GripConstraint(Enum, IComparable, IFormattable, IConvertible):
""" enum GH_GripConstraint, values: horizontal (1), none (0), vertical (2) """
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
horizontal = None
none = None
value__ = None
vertical = None
class GH_LinearGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_LinearGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_LinearGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_LinearGraph) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: GH_LinearGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: GH_LinearGraph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_LinearGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_LinearGraph, reader: GH_IReader) -> bool """
pass
def SetFromParameters(self, nA, nB):
""" SetFromParameters(self: GH_LinearGraph, nA: float, nB: float) """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_LinearGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_LinearGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_LinearGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_LinearGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_LinearGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_LinearGraph) -> bool
"""
GH_LinearGraphProxy = None
class GH_ParabolaGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_ParabolaGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_ParabolaGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_ParabolaGraph) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: GH_ParabolaGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_ParabolaGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_ParabolaGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_ParabolaGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_ParabolaGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_ParabolaGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_ParabolaGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_ParabolaGraph) -> bool
"""
class GH_PerlinGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_PerlinGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_PerlinGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_PerlinGraph) """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_PerlinGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Interpolate(self, *args): #cannot find CLR method
""" Interpolate(self: GH_PerlinGraph, v0: float, v1: float, v2: float, v3: float, a: float) -> float """
pass
def Noise(self, *args): #cannot find CLR method
""" Noise(self: GH_PerlinGraph, i: int) -> float """
pass
def Read(self, reader):
""" Read(self: GH_PerlinGraph, reader: GH_IReader) -> bool """
pass
def Smooth(self, *args): #cannot find CLR method
""" Smooth(self: GH_PerlinGraph, x: float) -> float """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_PerlinGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_PerlinGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_PerlinGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_PerlinGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_PerlinGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_PerlinGraph) -> bool
"""
amplitude = None
decay = None
frequency = None
x_offset = None
y_offset = None
class GH_PowerGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_PowerGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_PowerGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_PowerGraph) """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_PowerGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_PowerGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_PowerGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_PowerGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_PowerGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_PowerGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_PowerGraph) -> bool
"""
class GH_SincGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_SincGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_SincGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_SincGraph) """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_SincGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_SincGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_SincGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SincGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SincGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_SincGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_SincGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_SincGraph) -> bool
"""
amplitude = None
frequency = None
X0 = None
X1 = None
x_shift = None
Y0 = None
Y1 = None
y_shift = None
class GH_SineEquation(object, GH_ISerializable):
""" GH_SineEquation() """
def Read(self, reader):
""" Read(self: GH_SineEquation, reader: GH_IReader) -> bool """
pass
def SetEquationFromGrips(self):
""" SetEquationFromGrips(self: GH_SineEquation) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SineEquation, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SineEquation, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
amplitude = None
frequency = None
offset = None
shift = None
X0 = None
X1 = None
Y0 = None
Y1 = None
class GH_SineGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_SineGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_SineGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_SineGraph) """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_SineGraph, reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_SineGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_SineGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SineGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SineGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_SineGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_SineGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_SineGraph) -> bool
"""
m_eq = None
class GH_SquareRootGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_SquareRootGraph() """
def AddGrip(self, *args): #cannot find CLR method
""" AddGrip(self: GH_AbstractGraph, Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args): #cannot find CLR method
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args): #cannot find CLR method
""" CreateDerivedDuplicate(self: GH_SquareRootGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args): #cannot find CLR method
""" CreateGrips(self: GH_SquareRootGraph) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: GH_SquareRootGraph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def GHGraphToPointArray(self, *args): #cannot find CLR method
"""
GHGraphToPointArray(reg: RectangleF, pix_accuracy: float, eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph, reg: RectangleF, pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args): #cannot find CLR method
""" Internal_GripChanged(self: GH_AbstractGraph, grip: GH_GraphGrip, bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_SquareRootGraph, reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args): #cannot find CLR method
""" UpdateEquation(self: GH_SquareRootGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SquareRootGraph, t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SquareRootGraph, writer: GH_IWriter) -> bool """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: GH_SquareRootGraph) -> Guid
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: GH_SquareRootGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: GH_SquareRootGraph) -> bool
"""
class IGH_Graph(GH_ISerializable):
# no doc
def ClearCaches(self):
""" ClearCaches(self: IGH_Graph) """
pass
def Draw_PostRenderGraph(self, g, cnt):
""" Draw_PostRenderGraph(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrid(self, g, cnt):
""" Draw_PostRenderGrid(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrip(self, g, cnt, index):
""" Draw_PostRenderGrip(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer, index: int) """
pass
def Draw_PostRenderTags(self, g, cnt):
""" Draw_PostRenderTags(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrid(self, g, cnt):
""" Draw_PreRenderGrid(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self, g, cnt, index):
""" Draw_PreRenderGrip(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer, index: int) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderTags(self, g, cnt):
""" Draw_PreRenderTags(self: IGH_Graph, g: Graphics, cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Duplicate(self):
""" Duplicate(self: IGH_Graph) -> IGH_Graph """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: IGH_Graph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: IGH_Graph, reg: RectangleF) -> Array[PointF] """
pass
def OnGraphChanged(self, bIntermediate):
""" OnGraphChanged(self: IGH_Graph, bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: IGH_Graph) """
pass
def ValueAt(self, t):
""" ValueAt(self: IGH_Graph, t: float) -> float """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Description = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Description(self: IGH_Graph) -> str
"""
GraphTypeID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GraphTypeID(self: IGH_Graph) -> Guid
"""
Grips = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Grips(self: IGH_Graph) -> List[GH_GraphGrip]
"""
Icon_16x16 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon_16x16(self: IGH_Graph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsValid(self: IGH_Graph) -> bool
"""
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Name(self: IGH_Graph) -> str
"""
GraphChanged = None
GraphChangedEventHandler = None
class IGH_GraphProxyObject:
# no doc
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| 35.653944
| 221
| 0.623466
| 6,154
| 56,048
| 5.40949
| 0.047449
| 0.047762
| 0.063503
| 0.081646
| 0.820607
| 0.791499
| 0.768369
| 0.736618
| 0.690478
| 0.67663
| 0
| 0.00401
| 0.252551
| 56,048
| 1,571
| 222
| 35.676639
| 0.790633
| 0.444173
| 0
| 0.837278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.402367
| false
| 0.402367
| 0
| 0
| 0.58432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c44cc5affe888caa37e8eb6d25f96f2eee14d6d4
| 1,145
|
py
|
Python
|
widget/urls.py
|
xni06/wagtail-CMS
|
defe0f46e8109e96d6d5e9fd4cf002790fbcd54b
|
[
"MIT"
] | 4
|
2019-06-04T07:18:44.000Z
|
2020-06-15T22:27:36.000Z
|
widget/urls.py
|
xni06/wagtail-CMS
|
defe0f46e8109e96d6d5e9fd4cf002790fbcd54b
|
[
"MIT"
] | 38
|
2019-05-09T13:14:56.000Z
|
2022-03-12T00:54:57.000Z
|
widget/urls.py
|
xni06/wagtail-CMS
|
defe0f46e8109e96d6d5e9fd4cf002790fbcd54b
|
[
"MIT"
] | 3
|
2019-09-26T14:32:36.000Z
|
2021-05-06T15:48:01.000Z
|
from django.conf.urls import include, url
from widget.views import widget_iframe, widget_embed
urlpatterns = [
url(r'(?P<uk_prn>[\w\-]+?)/(?P<kis_course_id>[\w\-\~]+?)/', include([
url(r'^small/$(?i)', widget_iframe, name='widget_iframe'),
url(r'^small/(?P<optional_1>[\w\-]+?)/$(?i)', widget_iframe, name='widget_iframe'),
url(r'^small/(?P<optional_1>[\w\-]+?)/(?P<optional_2>[\w\-]+?)/$(?i)', widget_iframe, name='widget_iframe'),
url(r'^(?P<optional_1>[\w\-]+?)/small/$(?i)', widget_iframe, name='widget_iframe'),
url(r'^(?P<optional_1>[\w\-]+?)/small/(?P<optional_2>[\w\-]+?)/$(?i)', widget_iframe, name='widget_iframe'),
url(r'^(?P<optional_1>[\w\-]+?)/small/(?P<optional_2>[\w\-]+?)/(?P<optional_3>[\w\-]+?)/$(?i)',
widget_iframe, name='widget_iframe'),
])),
url(r'^embed-script$(?i)', widget_embed, name='widget_embed'),
url(r'^embed-script/$(?i)', widget_embed, name='widget_embed'),
url(r'^embed-script.js$(?i)', widget_embed, name='widget_embed'),
url(r'^embed-script.js/$(?i)', widget_embed, name='widget_embed'),
]
| 57.25
| 120
| 0.570306
| 160
| 1,145
| 3.86875
| 0.18125
| 0.252019
| 0.12601
| 0.164782
| 0.783522
| 0.783522
| 0.783522
| 0.783522
| 0.783522
| 0.726979
| 0
| 0.009184
| 0.144105
| 1,145
| 19
| 121
| 60.263158
| 0.622449
| 0
| 0
| 0
| 0
| 0.058824
| 0.483843
| 0.331004
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4521332671487f04649aec8b2ec9e29849bbe9f
| 135,697
|
py
|
Python
|
aura/extensions/eve_rpg/eve_rpg.py
|
shibdib/Aura
|
2612052372157cf4a207007464815d2dbe638ba0
|
[
"MIT"
] | null | null | null |
aura/extensions/eve_rpg/eve_rpg.py
|
shibdib/Aura
|
2612052372157cf4a207007464815d2dbe638ba0
|
[
"MIT"
] | null | null | null |
aura/extensions/eve_rpg/eve_rpg.py
|
shibdib/Aura
|
2612052372157cf4a207007464815d2dbe638ba0
|
[
"MIT"
] | null | null | null |
import ast
import asyncio
import datetime
import itertools
import random
from aura.lib import db
from aura.lib import game_assets
from aura.lib import game_functions
from aura.utils import make_embed
async def process_region_stats():
current_tick = await game_functions.get_tick()
if current_tick % 300 == 0:
sql = "SELECT * FROM region_info"
regions = await db.select(sql)
for region in regions:
current_hourly_npc = region[7]
current_hourly_player = region[9]
sql = ''' UPDATE region_info
SET npc_kills_previous_hour = (?),
player_kills_previous_hour = (?),
npc_kills_hour = 0,
player_kills_hour = 0
WHERE
region_id = (?); '''
values = (current_hourly_npc, current_hourly_player, region[1],)
await db.execute_sql(sql, values)
if current_tick % 7200 == 0:
sql = "SELECT * FROM region_info"
regions = await db.select(sql)
for region in regions:
current_day_npc = region[8]
current_day_player = region[10]
sql = ''' UPDATE region_info
SET npc_kills_previous_day = (?),
player_kills_previous_day = (?),
npc_kills_day = 0,
player_kills_day = 0
WHERE
region_id = (?); '''
values = (current_day_npc, current_day_player, region[1],)
await db.execute_sql(sql, values)
async def weighted_choice(items):
"""items is a list of tuples in the form (item, weight)"""
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
for item, weight in items:
if n < weight:
return item
n = n - weight
return item
async def give_mod(player, mods):
player = await game_functions.refresh_player(player)
ship = ast.literal_eval(player[14])
for mod in mods:
if 'module_cargo_bay' in ship:
ship['module_cargo_bay'].append(mod)
else:
ship['module_cargo_bay'] = [mod]
new_ship = str(ship)
sql = ''' UPDATE eve_rpg_players
SET ship = (?)
WHERE
player_id = (?); '''
values = (new_ship, player[2],)
return await db.execute_sql(sql, values)
async def add_combat_timer(player):
player = await game_functions.refresh_player(player)
sql = ''' UPDATE eve_rpg_players
SET combat_timer = (?)
WHERE
player_id = (?); '''
values = (5, player[2],)
return await db.execute_sql(sql, values)
async def add_loss(player):
player = await game_functions.refresh_player(player)
sql = ''' UPDATE eve_rpg_players
SET losses = (?)
WHERE
player_id = (?); '''
values = (int(player[11]) + 1, player[2],)
return await db.execute_sql(sql, values)
async def add_kill(player, mods):
player = await game_functions.refresh_player(player)
sql = ''' UPDATE eve_rpg_players
SET kills=?,
ship=?
WHERE
player_id=?; '''
killer_ship = ast.literal_eval(player[14])
if 'kill_marks' not in killer_ship:
killer_ship['kill_marks'] = 1
else:
killer_ship['kill_marks'] += 1
if mods is not None:
for mod in mods:
if 'module_cargo_bay' in killer_ship:
killer_ship['module_cargo_bay'].append(mod)
else:
killer_ship['module_cargo_bay'] = [mod]
values = (int(player[10]) + 1, str(killer_ship), player[2],)
return await db.execute_sql(sql, values)
async def update_journal(player, isk, entry):
player = await game_functions.refresh_player(player)
utc = datetime.datetime.utcnow()
time = utc.strftime("%H:%M:%S")
if player[20] is not None:
journal = ast.literal_eval(player[20])
if len(journal) == 10:
journal.pop(0)
transaction = {'isk': isk, 'type': entry, 'time': time}
journal.append(transaction)
else:
transaction = {'isk': isk, 'type': entry, 'time': time}
journal = [transaction]
sql = ''' UPDATE eve_rpg_players
SET wallet_journal = (?)
WHERE
player_id = (?); '''
values = (str(journal), player[2],)
return await db.execute_sql(sql, values)
async def add_isk(player, isk):
player = await game_functions.refresh_player(player)
sql = ''' UPDATE eve_rpg_players
SET isk = (?)
WHERE
player_id = (?); '''
values = (int(player[5]) + isk, player[2],)
return await db.execute_sql(sql, values)
async def add_xp(player, xp_gained):
player = await game_functions.refresh_player(player)
if player[9] + xp_gained < 100 * player[8]:
sql = ''' UPDATE eve_rpg_players
SET xp = (?)
WHERE
player_id = (?); '''
values = (player[9] + xp_gained, player[2],)
else:
sql = ''' UPDATE eve_rpg_players
SET level = (?),
xp = (?)
WHERE
player_id = (?); '''
values = (player[8] + 1, 0, player[2],)
return await db.execute_sql(sql, values)
class EveRpg:
def __init__(self, bot):
self.bot = bot
self.ongoing_fleet_fights = {}
self.session = bot.session
self.config = bot.config
self.logger = bot.logger
self.loop = asyncio.get_event_loop()
self.loop.create_task(self.tick_loop())
self.active_sites = 0
async def tick_loop(self):
await self.bot.wait_until_ready()
await self.initial_checks()
while not self.bot.is_closed():
try:
await self.process_travel()
asyncio.ensure_future(game_functions.tick_count())
asyncio.ensure_future(game_functions.combat_timer_management())
asyncio.ensure_future(self.process_special_regions())
asyncio.ensure_future(process_region_stats())
asyncio.ensure_future(self.process_belt_ratting())
asyncio.ensure_future(self.process_missions())
asyncio.ensure_future(self.process_belt_mining())
asyncio.ensure_future(self.process_anomaly_mining())
asyncio.ensure_future(self.process_anomaly_ratting())
asyncio.ensure_future(self.process_ongoing_fleet_fights())
asyncio.ensure_future(self.process_roams())
asyncio.ensure_future(self.process_ganks())
asyncio.ensure_future(self.process_users())
await asyncio.sleep(12)
except Exception:
self.logger.exception('ERROR:')
await asyncio.sleep(5)
async def initial_checks(self):
sql = "SELECT * FROM eve_rpg_players"
players = await db.select(sql)
for player in players:
user = self.bot.get_user(player[2])
if user is None:
await self.remove_bad_user(player[2])
continue
# Make sure regions are in the db
for key, region in game_assets.regions.items():
sql = "SELECT * FROM region_info WHERE `region_id` = (?)"
values = (key,)
region = await db.select_var(sql, values)
if len(region) == 0:
sec_status = await game_functions.get_region_security(key)
sql = ''' REPLACE INTO region_info(region_id,region_security)
VALUES(?,?) '''
values = (key, sec_status)
await db.execute_sql(sql, values)
sql = "SELECT * FROM region_market WHERE `region_id` = (?)"
values = (key,)
region = await db.select_var(sql, values)
if len(region) == 0:
sql = ''' REPLACE INTO region_market(region_id)
VALUES(?) '''
values = (key,)
await db.execute_sql(sql, values)
async def process_users(self):
current_tick = await game_functions.get_tick()
if current_tick % 100 == 0:
sql = "SELECT * FROM eve_rpg_players"
players = await db.select(sql)
for player in players:
user = self.bot.get_user(player[2])
if user is None:
await self.remove_bad_user(player[2])
continue
async def process_special_regions(self):
sql = "SELECT * FROM region_info WHERE `pirate_anomaly` > 0 AND `region_security` != 'High'"
active_pirate_anomalies = await db.select(sql)
current_tick = await game_functions.get_tick()
if len(active_pirate_anomalies) < 8:
if len(active_pirate_anomalies) < 8:
sql = "SELECT * FROM region_info WHERE `pirate_anomaly` == 0 AND `region_security` != 'High'"
potential_pirate_anomalies = await db.select(sql)
random.shuffle(potential_pirate_anomalies)
trimmed_list = potential_pirate_anomalies[:8 - len(active_pirate_anomalies)]
for new_anomaly in trimmed_list:
sql = ''' UPDATE region_info
SET pirate_anomaly = 1
WHERE
region_id = (?); '''
values = (new_anomaly[1],)
await db.execute_sql(sql, values)
elif current_tick % 300 == 0:
reset_amount = random.randint(1, 8)
random.shuffle(active_pirate_anomalies)
trimmed_list = active_pirate_anomalies[:reset_amount]
for reset_anomaly in trimmed_list:
sql = ''' UPDATE region_info
SET pirate_anomaly = 0
WHERE
region_id = (?); '''
values = (reset_anomaly[1],)
await db.execute_sql(sql, values)
sql = "SELECT * FROM eve_rpg_players WHERE `region` == (?) AND (`task` == 7 OR `task` == 34)"
values = (reset_anomaly[1],)
anomaly_runners = await db.select_var(sql, values)
if len(anomaly_runners) > 0:
for runner in anomaly_runners:
sql = ''' UPDATE eve_rpg_players
SET task = 21
WHERE
id = (?); '''
values = (runner[0],)
await db.execute_sql(sql, values)
player = self.bot.get_user(runner[2])
await player.send('**Notice** The pirates have fled the system, the anomaly you were in '
'has been defeated and you are now floating in space.')
sql = "SELECT * FROM region_info WHERE `mining_anomaly` > 0 AND `region_security` != 'High'"
active_mining_anomalies = await db.select(sql)
if len(active_mining_anomalies) < 8:
if len(active_mining_anomalies) < 8:
sql = "SELECT * FROM region_info WHERE `mining_anomaly` == 0 AND `region_security` != 'High'"
potential_pirate_anomalies = await db.select(sql)
random.shuffle(potential_pirate_anomalies)
trimmed_list = potential_pirate_anomalies[:8 - len(active_mining_anomalies)]
for new_anomaly in trimmed_list:
sql = ''' UPDATE region_info
SET mining_anomaly = 1
WHERE
region_id = (?); '''
values = (new_anomaly[1],)
await db.execute_sql(sql, values)
elif current_tick % 300 == 0:
reset_amount = random.randint(1, 8)
random.shuffle(active_mining_anomalies)
trimmed_list = active_mining_anomalies[:reset_amount]
for reset_anomaly in trimmed_list:
sql = ''' UPDATE region_info
SET mining_anomaly = 0
WHERE
region_id = (?); '''
values = (reset_anomaly[1],)
await db.execute_sql(sql, values)
sql = "SELECT * FROM eve_rpg_players WHERE `region` == (?) AND (`task` == 11 OR `task` == 35)"
values = (reset_anomaly[1],)
anomaly_runners = await db.select_var(sql, values)
if len(anomaly_runners) > 0:
for runner in anomaly_runners:
sql = ''' UPDATE eve_rpg_players
SET task = 21
WHERE
id = (?); '''
values = (runner[0],)
await db.execute_sql(sql, values)
player = self.bot.get_user(runner[2])
await player.send(
'**Notice** The asteroid have been mined, the anomaly you were once in is now '
'nothing more than a dust cloud.')
async def process_ongoing_fleet_fights(self):
if len(self.ongoing_fleet_fights) > 0:
for fight in self.ongoing_fleet_fights:
await self.fleet_versus_fleet(fight['attacker'], fight['defender'], fight['region'], fight['damaged'])
async def process_travel(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 20 '''
travelers = await db.select(sql)
if travelers is None or len(travelers) is 0:
return
for traveler in travelers:
region_id = int(traveler[4])
destination_id = int(traveler[17])
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 3 AND (`region` = (?) OR `region` = (?)) '''
values = (region_id, destination_id,)
campers = await db.select_var(sql, values)
traveler_ship = ast.literal_eval(traveler[14])
defender_ship_id = traveler_ship['ship_type']
defender_attack, defender_defense, defender_maneuver, defender_tracking = \
await game_functions.get_combat_attributes(traveler, defender_ship_id)
if len(campers) is not 0:
for camper in campers:
# Corp check
if camper[23] is not None:
corp_info = await game_functions.get_user_corp(camper[23])
corp_array = ast.literal_eval(corp_info[7])
if traveler[0] in corp_array:
continue
# Blue check
if camper[21] is not None:
blue_array = ast.literal_eval(camper[21])
if traveler[0] in blue_array:
continue
# Fleet check
if camper[16] is not None and camper[16] != 0:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (camper[16],)
fleet_info = await db.select_var(sql, values)
fleet_array = ast.literal_eval(fleet_info[0][3])
if traveler[0] in fleet_array:
continue
conflict = await weighted_choice([(True, 55 - defender_maneuver), (False, 55)])
if conflict is True:
camper_fleet = False
traveler_fleet = False
if camper[16] is not None and camper[16] != 0:
camper_fleet = True
if traveler[16] is not None and traveler[16] != 0:
traveler_fleet = True
if traveler_fleet is False and camper_fleet is False:
await self.solo_combat(camper, traveler)
elif traveler_fleet is False and camper_fleet is True:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (camper[16],)
fleet_info = await db.select_var(sql, values)
await self.fleet_versus_player(fleet_info[0], traveler, region_id)
elif camper_fleet is False and traveler_fleet is True:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (traveler[16],)
fleet_info = await db.select_var(sql, values)
await self.fleet_versus_player(fleet_info[0], camper, region_id)
elif camper_fleet is True and traveler_fleet is True:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (traveler[16],)
fleet_one_info = await db.select_var(sql, values)
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (camper[16],)
fleet_two_info = await db.select_var(sql, values)
await self.fleet_versus_fleet(fleet_one_info[0], fleet_two_info[0], region_id)
destination_name = await game_functions.get_region(destination_id)
sql = ''' UPDATE eve_rpg_players
SET region = (?),
task = 21
WHERE
player_id = (?); '''
values = (int(destination_id), traveler[2],)
await db.execute_sql(sql, values)
player = self.bot.get_user(traveler[2])
sql = ''' SELECT * FROM eve_rpg_players WHERE `region` = (?) '''
values = (destination_id,)
local_players = await db.select_var(sql, values)
region_info = await game_functions.get_region_info(destination_id)
anomaly_text = ''
if region_info[4] != 0:
anomaly_text = "*Pirate Anomalies Present In This Region*\n\n"
if region_info[5] != 0:
anomaly_text = "*Rich Mining Anomalies Present In This Region*\n\n"
if region_info[5] != 0 and region_info[4] != 0:
anomaly_text = "*Pirate Anomalies Present In This Region*\n*Rich Ore Anomalies Present In This Region*\n\n"
pve_kills_hour, pve_kills_day, pvp_kills_hour, pvp_kills_day, pve_kills_last_hour, pve_kills_yesterday, pvp_kills_last_hour, pvp_kills_yesterday = await game_functions.get_region_kill_info(
destination_id)
await player.send('**You have arrived in {}**\n\n{}'
'Local Count - {}\n'
'NPC Kills Last Hour/Prior Hour - {}/{}\n'
'Player Kills Last Hour/Prior Hour - {}/{}'.format(destination_name, anomaly_text,
len(local_players),
pve_kills_hour, pve_kills_last_hour,
pvp_kills_hour, pvp_kills_last_hour))
async def process_belt_ratting(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 6 '''
ratters = await db.select(sql)
if ratters is None or len(ratters) is 0:
return
for ratter in ratters:
region_id = int(ratter[4])
region_security = await game_functions.get_region_security(region_id)
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 6 AND `region` = (?) '''
values = (region_id,)
system_ratters = await db.select_var(sql, values)
npc = 45
isk_multi = 0.25
if region_security == 'Low':
npc = 55
isk_multi = 0.5
elif region_security == 'Null':
npc = 75
isk_multi = 0.65
# PVE Rolls
encounter = await weighted_choice(
[(True, npc / len(system_ratters)), (False, 100 - npc + 1)])
if encounter is True:
await self.process_pve_combat(ratter, isk_multi)
async def process_anomaly_ratting(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 7 '''
ratters = await db.select(sql)
if ratters is None or len(ratters) is 0:
return
for ratter in ratters:
region_id = int(ratter[4])
region_security = await game_functions.get_region_security(region_id)
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 7 AND `region` = (?) '''
values = (region_id,)
system_ratters = await db.select_var(sql, values)
npc = 35
if region_security == 'Low':
npc = 55
elif region_security == 'Null':
npc = 75
# PVE Rolls
encounter = await weighted_choice(
[(True, npc / len(system_ratters)), (False, 100 - npc + 1)])
if encounter is True:
await self.process_pve_combat(ratter)
async def process_belt_mining(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 10 '''
miners = await db.select(sql)
if miners is None or len(miners) is 0:
return
for miner in miners:
region_id = int(miner[4])
region_security = await game_functions.get_region_security(region_id)
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 10 AND `region` = (?) '''
values = (region_id,)
belt_miners = await db.select_var(sql, values)
isk = random.randint(100, 750)
possible_npc = False
ore = 50
if region_security == 'Low':
ore = 75
possible_npc = 2
isk = random.randint(2500, 8500)
elif region_security == 'Null':
ore = 90
possible_npc = 4
isk = random.randint(8000, 22450)
find_ore = await weighted_choice(
[(True, ore / len(belt_miners)), (False, 100 - (ore / len(belt_miners)))])
if find_ore is False:
continue
else:
if possible_npc is not False:
encounter = await weighted_choice([(True, possible_npc), (False, 100 - possible_npc)])
if encounter is True:
await self.process_pve_combat(miner)
# Ship multi
miner_ship = ast.literal_eval(miner[14])
ship_id = miner_ship['ship_type']
ship = await game_functions.get_ship(ship_id)
multiplier = 1
if ship['class'] == 21:
multiplier = 2.25
if ship['id'] == 80:
multiplier = 4
if ship['id'] == 81:
multiplier = 8
if ship['id'] == 90:
multiplier = 6
if ship['id'] == 91:
multiplier = 12
if miner[12] is not None:
modules = ast.literal_eval(miner[12])
for module in modules:
if module == 17:
isk = (isk * .1) + isk
continue
if module == 18:
isk = (isk * .2) + isk
continue
if module == 121:
isk = (isk * .05) + isk
continue
if module == 122:
isk = (isk * .1) + isk
xp_gained = await weighted_choice([(1, 35), (2, 15), (0, 15)])
await add_xp(miner, xp_gained)
await add_isk(miner, isk * multiplier)
await update_journal(miner, isk * multiplier, 'Belt Mining')
async def process_anomaly_mining(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 11 '''
miners = await db.select(sql)
if miners is None or len(miners) is 0:
return
for miner in miners:
region_id = int(miner[4])
region_security = await game_functions.get_region_security(region_id)
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 11 AND `region` = (?) '''
values = (region_id,)
belt_miners = await db.select_var(sql, values)
isk = random.randint(100, 750)
possible_npc = False
ore = 50
if region_security == 'Low':
ore = 99
possible_npc = 4
isk = random.randint(12000, 19500)
elif region_security == 'Null':
ore = 99
possible_npc = 8
isk = random.randint(18000, 65000)
find_ore = await weighted_choice(
[(True, ore / len(belt_miners)), (False, 100 - (ore / len(belt_miners)))])
if find_ore is False:
continue
else:
if possible_npc is not False:
encounter = await weighted_choice([(True, possible_npc), (False, 100 - possible_npc)])
if encounter is True:
await self.process_pve_combat(miner)
# Ship multi
miner_ship = ast.literal_eval(miner[14])
ship_id = miner_ship['ship_type']
ship = await game_functions.get_ship(ship_id)
multiplier = 1
if ship['class'] == 21:
multiplier = 2.25
if ship['id'] == 80:
multiplier = 4
if ship['id'] == 81:
multiplier = 8
if ship['id'] == 90:
multiplier = 6
if ship['id'] == 91:
multiplier = 12
if miner[12] is not None:
modules = ast.literal_eval(miner[12])
for module in modules:
if module == 17:
isk = (isk * .1) + isk
continue
if module == 18:
isk = (isk * .2) + isk
continue
if module == 121:
isk = (isk * .05) + isk
continue
if module == 122:
isk = (isk * .1) + isk
xp_gained = await weighted_choice([(1, 35), (2, 15), (0, 15)])
await add_xp(miner, xp_gained)
await add_isk(miner, isk * multiplier)
await update_journal(miner, isk * multiplier, 'Anomaly Mining')
async def process_missions(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 9 '''
mission_runners = await db.select(sql)
if mission_runners is None or len(mission_runners) is 0:
return
for mission_runner in mission_runners:
if mission_runner[22] is None:
sql = ''' UPDATE eve_rpg_players
SET task = 10
WHERE
player_id = (?); '''
values = (mission_runner[2],)
return await db.execute_sql(sql, values)
mission_details = ast.literal_eval(mission_runner[22])
isk = mission_details['reward']
# PVE Rolls
complete_mission = await weighted_choice([(True, 15), (False, 30 * mission_details['level'])])
enounter = await weighted_choice([(True, 70), (False, 30)])
if enounter is True and complete_mission is False:
await self.process_pve_combat(mission_runner, 1, mission_details['level'])
else:
if complete_mission is False:
continue
xp_gained = await weighted_choice([(1 * mission_details['level'], 35),
(3 * mission_details['level'], 15),
(0, 15)])
await add_xp(mission_runner, xp_gained)
await add_isk(mission_runner, isk)
loot_chance = 4 * mission_details['level']
await self.pve_loot(mission_runner, loot_chance, True)
await update_journal(mission_runner, isk, 'Mission Reward')
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
embed.add_field(name="Mission Completed",
value="{}\n\n"
"Reward: {} ISK\n".format(mission_details['completion'],
'{0:,.2f}'.format(float(mission_details['reward']))))
mission_runner = await game_functions.refresh_player(mission_runner)
player = self.bot.get_user(mission_runner[2])
await player.send(embed=embed)
sql = ''' UPDATE eve_rpg_players
SET task = 21,
mission_details = (?)
WHERE
player_id = (?); '''
values = (None, mission_runner[2],)
await db.execute_sql(sql, values)
async def process_pve_combat(self, player, isk_multi=1, mission=False):
region_id = int(player[4])
player_user = self.bot.get_user(player[2])
player_task = await game_functions.get_task(int(player[6]))
player_ship = ast.literal_eval(player[14])
ship_id = player_ship['ship_type']
player_ship_info = await game_functions.get_ship(ship_id)
player_attack, player_defense, player_maneuver, player_tracking = \
await game_functions.get_combat_attributes(player, ship_id)
payout_array = [player]
if player[16] is not None and player[16] != 0:
payout_array = []
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (player[16],)
fleet_info = await db.select_var(sql, values)
if len(fleet_info) > 0:
fleet_array = ast.literal_eval(fleet_info[0][3])
player_attack = 0
for member_id in fleet_array:
sql = ''' SELECT * FROM eve_rpg_players WHERE `id` = (?) '''
values = (int(member_id),)
member = await db.select_var(sql, values)
if len(member) == 0:
await self.remove_bad_user_id(member_id)
await self.remove_bad_fleet(player[16])
continue
if member[0][4] != region_id:
continue
if member[0][6] != player[6] and int(player[6]) != 10:
continue
payout_array.append(member[0])
member_ship = ast.literal_eval(member[0][14])
member_attack, member_defense, member_maneuver, member_tracking = \
await game_functions.get_combat_attributes(member[0], member_ship['ship_type'])
player_attack += member_attack
ship = await game_functions.get_ship(ship_id)
region_security = await game_functions.get_region_security(region_id)
officer = False
if mission is not False:
npc = await game_functions.get_npc(mission + 9)
elif region_security == 'High':
npc = await game_functions.get_npc(0)
elif region_security == 'Low':
npc = await game_functions.get_npc(1)
else:
officer = await weighted_choice([(True, 1), (False, 750)])
if officer is False:
npc = await game_functions.get_npc(2)
else:
npc = await game_functions.get_npc(20)
escape_chance = player_defense / 2 + player_maneuver
if player_maneuver == 0:
escape_chance = 0
npc_attack, npc_defense, npc_maneuver, npc_tracking = \
await game_functions.get_combat_attributes(player, npc['id'], True)
region_name = await game_functions.get_region(int(region_id))
# Combat
transversal = 1
if (player_maneuver * 0.75) > npc_tracking + 1:
transversal = (npc_tracking + 1) / (player_maneuver * 0.75)
minimum_npc_damage = (npc_attack * transversal)
maximum_npc_damage = npc_attack
npc_triangular_medium = (maximum_npc_damage + minimum_npc_damage) / 4
transversal = 1
if (npc_maneuver * 0.75) > player_tracking + 1:
transversal = (player_tracking + 1) / (npc_maneuver * 0.75)
minimum_player_damage = (player_attack * transversal)
maximum_player_damage = player_attack
player_triangular_medium = (minimum_player_damage + maximum_player_damage) / 4
player_hits, npc_hits = ship['hit_points'], npc['hit_points']
round_counter = 1
for x in range(125):
player_defense = await game_functions.manage_regen(player, player_defense)
npc_damage = round(random.triangular(minimum_npc_damage, maximum_npc_damage, npc_triangular_medium), 3)
player_damage = round(
random.triangular(minimum_player_damage, maximum_player_damage, player_triangular_medium), 3)
if round_counter % 2 == 0:
aggressor = False
else:
aggressor = player
round_counter += 1
if aggressor != player:
if player_defense > 0:
if player_defense >= npc_damage:
player_defense -= npc_damage
else:
npc_damage -= player_defense
player_defense = 0
player_hits -= npc_damage
else:
player_hits -= npc_damage
else:
if npc_defense > 0:
if npc_defense >= player_damage:
npc_defense -= player_damage
else:
player_damage -= npc_defense
npc_defense = 0
npc_hits -= player_damage
else:
npc_hits -= player_damage
if player_hits <= 0:
break
if npc_hits <= 0:
for player in payout_array:
await game_functions.track_npc_kills(region_id)
await add_xp(player, random.randint(2, 10))
await add_isk(player, int(float((npc['isk'] * isk_multi))) / len(payout_array))
await update_journal(player, int(float((npc['isk'] * isk_multi))) / len(payout_array),
'{} - {}'.format(player_task, npc['name']))
if officer is True:
await self.pve_loot(player, 1, False, True)
return
if player_hits < ship['hit_points']:
escape = await weighted_choice([(True, escape_chance), (False, 100 - escape_chance)])
if escape is True:
await player_user.send(
'**PVE ESCAPE** - Combat between you and a {}, they nearly killed your {} but you '
'managed to warp off.'.format(npc['name'], player_ship_info['name']))
return
if npc_hits > 0 and player_hits > 0:
await player_user.send(
'**PVE DISENGAGE** - Combat between you and a {}, has ended in a draw. You ended the battle '
'with {} of {} hit points, while they ended with {} of {} hit points.'.format(
npc['name'], player_hits, ship['hit_points'], npc_hits,
npc['hit_points']))
return
module_value = 0
loser_modules = ''
cargo_modules = ''
loser_modules_array = []
cargo_modules_array = []
loser_name = player_user.display_name
if player[23] is not None:
corp_info = await game_functions.get_user_corp(player[23])
loser_name = '{} [{}]'.format(loser_name, corp_info[4])
if player[12] is not None:
modules = ast.literal_eval(player[12])
for module in modules:
module_item = await game_functions.get_module(module)
module_value += module_item['isk']
loser_modules_array.append('{}'.format(module_item['name']))
loser_module_list = '\n'.join(loser_modules_array)
loser_modules = '\n\n__Equipped Modules Lost__\n{}'.format(loser_module_list)
if 'module_cargo_bay' in player_ship:
modules = player_ship['module_cargo_bay']
for module in modules:
module_item = await game_functions.get_module(module)
module_value += module_item['isk']
cargo_modules_array.append('{}'.format(module_item['name']))
cargo_module_list = '\n'.join(cargo_modules_array)
cargo_modules = '\n\n__Cargo Lost__\n{}'.format(cargo_module_list)
module_value += ship['isk']
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
ship_image = await game_functions.get_ship_image(ship_id)
embed.set_thumbnail(url="{}".format(ship_image))
embed.add_field(name="NPC Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed while they were {}.{}{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"__**Final Blow**__\n"
"**{}**\n\n".format(region_name, loser_name, ship['name'], player_task,
loser_modules, cargo_modules, '{0:,.2f}'.format(float(module_value)),
npc['name']))
await add_loss(player)
await player_user.send(embed=embed)
if ship['class'] != 0:
await self.send_global(embed, True)
return await self.destroy_ship(player)
async def process_roams(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 31 OR `task` = 32 OR `task` = 33 OR `task` = 34 OR `task` = 35 '''
roamers = await db.select(sql)
if roamers is None or len(roamers) is 0:
return
for roamer in roamers:
region_id = int(roamer[4])
region_security = await game_functions.get_region_security(region_id)
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` != 1 AND `region` = (?) AND `player_id` != (?) '''
values = (region_id, roamer[2])
potential_targets = await db.select_var(sql, values)
if len(potential_targets) is 0 or region_security == 'High':
continue
else:
for target in potential_targets:
# Corp check
if roamer[23] is not None:
corp_info = await game_functions.get_user_corp(roamer[23])
corp_array = ast.literal_eval(corp_info[7])
if target[0] in corp_array:
continue
# Blue check
if roamer[21] is not None:
blue_array = ast.literal_eval(roamer[21])
if target[0] in blue_array:
continue
# Fleet check
if roamer[16] is not None and roamer[16] != 0:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (roamer[16],)
fleet_info = await db.select_var(sql, values)
fleet_array = ast.literal_eval(fleet_info[0][3])
if target[0] in fleet_array:
continue
target_aggression = 4
if roamer[6] == 31 and (target[6] == 6 or target[6] == 10):
target_aggression = 20
elif roamer[6] == 32 and target[6] == 20:
target_aggression = 20
elif roamer[6] == 33 and target[6] == 21:
target_aggression = 20
elif roamer[6] == 34 and target[6] == 7:
target_aggression = 20
elif roamer[6] == 35 and target[6] == 11:
target_aggression = 20
conflict = await weighted_choice([(True, target_aggression), (None, 100 - target_aggression)])
if conflict is None:
break
elif conflict is True:
roamer_fleet = False
target_fleet = False
if roamer[16] is not None and roamer[16] != 0:
roamer_fleet = True
if target[16] is not None and target[16] != 0:
target_fleet = True
if target_fleet is False and roamer_fleet is False:
await self.solo_combat(roamer, target)
elif target_fleet is False and roamer_fleet is True:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (roamer[16],)
fleet_info = await db.select_var(sql, values)
await self.fleet_versus_player(fleet_info[0], target, region_id)
elif roamer_fleet is False and target_fleet is True:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (target[16],)
fleet_info = await db.select_var(sql, values)
await self.fleet_versus_player(fleet_info[0], roamer, region_id)
elif roamer_fleet is True and target_fleet is True:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (target[16],)
fleet_one_info = await db.select_var(sql, values)
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (roamer[16],)
fleet_two_info = await db.select_var(sql, values)
await self.fleet_versus_fleet(fleet_one_info[0], fleet_two_info[0], region_id)
break
async def process_ganks(self):
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` = 4 '''
gankers = await db.select(sql)
if gankers is None or len(gankers) is 0:
return
for ganker in gankers:
region_id = int(ganker[4])
sql = ''' SELECT * FROM eve_rpg_players WHERE `task` != 1 AND `task` != 4 AND `region` = (?) AND `player_id` != (?) '''
values = (region_id, ganker[2])
potential_targets = await db.select_var(sql, values)
for target in potential_targets:
# Blue check
if ganker[21] is not None:
blue_array = ast.literal_eval(ganker[21])
if target[0] in blue_array:
return
# Fleet check
if ganker[16] is not None and ganker[16] != 0:
sql = ''' SELECT * FROM fleet_info WHERE `fleet_id` = (?) '''
values = (ganker[16],)
fleet_info = await db.select_var(sql, values)
fleet_array = ast.literal_eval(fleet_info[0][3])
if target[0] in fleet_array:
return
target_aggression = 5
if int(target[6]) == 9:
target_aggression = 2
conflict = await weighted_choice([(True, target_aggression), (False, 65), (None, 45)])
if conflict is None:
break
elif conflict is True:
region_id = int(ganker[4])
region_name = await game_functions.get_region(int(region_id))
ganker_user, target_user = self.bot.get_user(ganker[2]), self.bot.get_user(target[2])
target_task = await game_functions.get_task(target[6])
attacker_ship, defender_ship = ast.literal_eval(ganker[14]), ast.literal_eval(target[14])
attacker_ship_id, defender_ship_id = attacker_ship['ship_type'], defender_ship['ship_type']
attacker_attack, attacker_defense, attacker_maneuver, attacker_tracking = \
await game_functions.get_combat_attributes(ganker, attacker_ship_id)
defender_attack, defender_defense, defender_maneuver, defender_tracking = \
await game_functions.get_combat_attributes(target, defender_ship_id)
attacker_ship_info = await game_functions.get_ship(int(attacker_ship['ship_type']))
defender_ship_info = await game_functions.get_ship(int(defender_ship['ship_type']))
ganker_hits, target_hits = attacker_ship_info['hit_points'], defender_ship_info['hit_points']
turns = 0
success = False
concord_response = random.randint(4, 12)
concord = True
for x in range(50):
turns += 1
if turns >= concord_response:
success = False
target_user.send(
'**PVP** - {} attempted to gank you but Concord arrived in time to prevent it.'.format(
ganker_user.display_name))
break
# Figure out min/max damage
transversal = 1
if (defender_maneuver * 0.75) > attacker_tracking + 1:
transversal = (attacker_tracking + 1) / (defender_maneuver * 0.75)
minimum_attacker_damage = (attacker_attack * transversal)
maximum_attacker_damage = attacker_attack
attacker_triangular_medium = (minimum_attacker_damage + maximum_attacker_damage) / 4
damage = round(
random.triangular(minimum_attacker_damage, maximum_attacker_damage,
attacker_triangular_medium), 3)
# Determine if ship is already damaged
defense = defender_defense
hit_points = defender_ship_info['hit_points']
# if defense pool exist take damage out of it
if defense > 0:
if defense >= damage:
defense -= damage
else:
damage -= defense
defense = 0
hit_points -= damage
else:
hit_points -= damage
if ganker_hits <= 0:
success = False
concord = False
break
if target_hits <= 0:
success = True
break
if success is True:
target_modules = ''
target_modules_array = []
dropped_mods = []
module_value = 0
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
target_modules_array.append('{} {}'.format(module_item['name'], module_drop))
target_module_list = '\n'.join(target_modules_array)
target_modules = '\n\n__Modules Lost__\n{}'.format(target_module_list)
isk_lost = module_value + defender_ship_info['isk']
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
ship_image = await game_functions.get_ship_image(defender_ship['ship_type'])
embed.set_thumbnail(url="{}".format(ship_image))
embed.add_field(name="Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed while they were {}.{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"**Killer**\n"
"**{}** flying a {} while {}.\n\n".format(region_name,
target_user.display_name,
defender_ship_info['name'],
target_task, target_modules,
'{0:,.2f}'.format(
float(isk_lost)),
ganker_user.display_name,
attacker_ship_info['name'],
'Ganking'))
await ganker_user.send(embed=embed)
await target_user.send(embed=embed)
await self.send_global(embed, True)
await self.destroy_ship(target)
await add_loss(target)
await add_kill(ganker, dropped_mods)
await add_xp(ganker, 1)
await game_functions.track_player_kills(region_id)
target_modules = ''
target_modules_array = []
dropped_mods = []
module_value = 0
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
target_modules_array.append('{} {}'.format(module_item['name'], module_drop))
target_module_list = '\n'.join(target_modules_array)
target_modules = '\n\n__Modules Lost__\n{}'.format(target_module_list)
isk_lost = module_value + defender_ship_info['isk']
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
ship_image = await game_functions.get_ship_image(attacker_ship['ship_type'])
embed.set_thumbnail(url="{}".format(ship_image))
if concord is True:
embed.add_field(name="Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed while they were {}.{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"__**Final Blow**__\n"
"**Concord**\n\n"
"**Other Attackers**\n"
"**{}** flying a {}.\n\n".format(region_name, ganker_user.display_name,
attacker_ship_info['name'],
'Ganking', target_modules,
'{0:,.2f}'.format(float(isk_lost)),
target_user.display_name,
defender_ship_info['name'], target_task))
else:
embed.add_field(name="Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed while they were {}.{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"__**Final Blow**__\n"
"**{}** flying a {} while {}.\n\n".format(region_name,
ganker_user.display_name,
attacker_ship_info['name'],
'Ganking', target_modules,
'{0:,.2f}'.format(
float(isk_lost)),
target_user.display_name,
defender_ship_info['name'],
target_task))
await ganker_user.send(embed=embed)
await target_user.send(embed=embed)
await game_functions.track_player_kills(region_id)
await self.send_global(embed, True)
await self.destroy_ship(ganker)
await add_loss(ganker)
await add_kill(target, dropped_mods)
await add_xp(target, 1)
async def solo_combat(self, attacker, defender):
region = attacker[4]
region_name = await game_functions.get_region(int(region))
self.logger.info('Player vs. Player Battle in {}'.format(region_name))
# Give all participants a combat timer
attacker_fleet = [attacker]
defender_fleet = [defender]
await add_combat_timer(attacker)
await add_combat_timer(defender)
damaged_ships = {}
combat_log = {}
for x in range(125):
if len(attacker_fleet) == 0 or len(defender_fleet) == 0:
break
merged_fleet = [y for y in
itertools.chain.from_iterable(itertools.zip_longest(attacker_fleet, defender_fleet)) if y]
on_field = attacker_fleet + defender_fleet
random.shuffle(on_field)
# manage regen/combat log creation
for player in merged_fleet:
if player[2] not in combat_log:
combat_log[player[2]] = []
if player[0] not in damaged_ships:
continue
defense = damaged_ships[player[0]]['defense']
player_ship = ast.literal_eval(player[14])
player_ship_details = await game_functions.get_ship(player_ship['ship_type'])
hit_points = player_ship_details['hit_points']
new_defense = await game_functions.manage_regen(player, defense)
if new_defense != defense:
user = self.bot.get_user(player[2])
name = user.display_name
self.logger.info(
'{} regened {} defense. Resulting in {} defense remaining.'.format(name, new_defense - defense,
new_defense))
combat_log[player[2]].append(
'Your ship has regenerated {} defense points and now has {} defense remaining and {} hit points.'.format(
round(new_defense - defense, 1), round(new_defense, 2), hit_points))
damaged_ships[player[0]] = {'defense': new_defense, 'hit_points': hit_points}
for attacker in merged_fleet:
if len(attacker_fleet) == 0 or len(defender_fleet) == 0:
break
attacker_ship = ast.literal_eval(attacker[14])
attacker_attack, attacker_defense, attacker_maneuver, attacker_tracking = \
await game_functions.get_combat_attributes(attacker, attacker_ship['ship_type'])
target = None
target_ship_details = None
for target in on_field:
# if in same fleet find new target
if target == attacker or ((target in attacker_fleet and attacker in attacker_fleet) or (
target in defender_fleet and attacker in defender_fleet)):
continue
else:
target_ship = ast.literal_eval(target[14])
target_ship_details = await game_functions.get_ship(target_ship['ship_type'])
target_attack, target_defense, target_maneuver, target_tracking = \
await game_functions.get_combat_attributes(target, attacker_ship['ship_type'])
break
if target_ship_details is None:
continue
# Figure out min/max damage
transversal = 1
if (target_maneuver * 0.75) > attacker_tracking + 1:
transversal = (attacker_tracking + 1) / (target_maneuver * 0.75)
minimum_attacker_damage = (attacker_attack * transversal)
maximum_attacker_damage = attacker_attack
attacker_triangular_medium = (minimum_attacker_damage + maximum_attacker_damage) / 4
damage = round(
random.triangular(minimum_attacker_damage, maximum_attacker_damage, attacker_triangular_medium), 3)
original_damage = damage
# Determine if ship is already damaged
defense = target_defense
hit_points = target_ship_details['hit_points']
if target[0] in damaged_ships:
defense = damaged_ships[target[0]]['defense']
hit_points = damaged_ships[target[0]]['hit_points']
# if defense pool exist take damage out of it
if defense > 0:
if defense >= damage:
defense -= damage
else:
damage -= defense
defense = 0
hit_points -= damage
else:
hit_points -= damage
# if no damage done, continue
attacker_user, target_user = self.bot.get_user(attacker[2]), self.bot.get_user(target[2])
attacker_name, target_name = attacker_user.display_name, target_user.display_name
self.logger.info(
'{} attacked {} for {} damage. Resulting in {} defense remaining and {} hits.'.format(attacker_name,
target_name,
original_damage,
defense,
hit_points))
combat_log[attacker[2]].append(
'__*Attack*__ You attacked {} inflicting {} damage. Your target has {} defense remaining and {} hit points.'.format(
target_name, round(original_damage, 2), round(defense, 2), hit_points))
combat_log[target[2]].append(
'__*Defense*__ You were attacked by {} and received {} damage. You have {} defense remaining and {} hit points.'.format(
attacker_name, round(original_damage, 2), round(defense, 2), hit_points))
if hit_points > 0:
if target[0] in damaged_ships:
damaged_ships[target[0]] = {'hit_points': hit_points, 'defense': defense}
else:
damaged_ships[target[0]] = {'hit_points': hit_points, 'defense': defense}
# if badly damage target will attempt to flee
if defense < target_defense * 0.15:
flee = await weighted_choice([(True, target_maneuver), (False, attacker_tracking)])
if flee is True:
self.logger.info(
'{} has successfully fled the field.'.format(target_name))
combat_log[target[2]].append(
'__*Flee*__ You have fled the field. You have {} defense remaining and {} hit points.'.format(
round(defense, 2), hit_points))
combat_log[attacker[2]].append(
'__*Enemy Fled*__ {} has disengaged and warped away.'.format(target_name))
if target not in attacker_fleet:
defender_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
attacker_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
# Handle Cloak
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
if module_item['id'] == 40 or module_item['id'] == 41:
escape = await weighted_choice([(True, 50), (False, 50)])
if escape is True:
self.logger.info(
'{} has successfully fled the field.'.format(target_name))
combat_log[target[2]].append(
'__*Flee*__ You have cloaked. You have {} defense remaining and {} hit points.'.format(
round(defense, 2), hit_points))
combat_log[attacker[2]].append(
'__*Enemy Fled*__ {} has cloaked and warped away.'.format(
target_name))
if target not in attacker_fleet:
defender_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
attacker_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
continue
else:
killing_blow = attacker
winner_user, loser_user = self.bot.get_user(killing_blow[2]), self.bot.get_user(target[2])
winner_name = winner_user.display_name
if killing_blow[23] is not None:
corp_info = await game_functions.get_user_corp(killing_blow[23])
winner_name = '{} [{}]'.format(winner_name, corp_info[4])
loser_name = self.bot.get_user(int(target[2])).display_name
if target[23] is not None:
corp_info = await game_functions.get_user_corp(target[23])
loser_name = '{} [{}]'.format(loser_name, corp_info[4])
winner_ship_obj = ast.literal_eval(killing_blow[14])
winner_ship = await game_functions.get_ship_name(int(winner_ship_obj['ship_type']))
loser_ship_obj = ast.literal_eval(target[14])
loser_ship = await game_functions.get_ship_name(int(loser_ship_obj['ship_type']))
loser_ship_info = await game_functions.get_ship(int(loser_ship_obj['ship_type']))
loser_modules = ''
cargo_modules = ''
loser_modules_array = []
cargo_modules_array = []
dropped_mods = []
module_value = 0
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
loser_modules_array.append('{} {}'.format(module_item['name'], module_drop))
loser_module_list = '\n'.join(loser_modules_array)
loser_modules = '\n\n__Modules Lost__\n{}'.format(loser_module_list)
if 'module_cargo_bay' in loser_ship_obj:
modules = loser_ship_obj['module_cargo_bay']
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
loser_modules_array.append('{} {}'.format(module_item['name'], module_drop))
cargo_module_list = '\n'.join(cargo_modules_array)
cargo_modules = '\n\n__Cargo Lost__\n{}'.format(cargo_module_list)
xp_gained = await weighted_choice([(5, 45), (15, 25), (27, 15)])
isk_lost = module_value + loser_ship_info['isk']
if target not in attacker_fleet:
defender_fleet.remove(target)
else:
attacker_fleet.remove(target)
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
ship_image = await game_functions.get_ship_image(loser_ship_obj['ship_type'])
embed.set_thumbnail(url="{}".format(ship_image))
embed.add_field(name="Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed.{}{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"__**Final Blow**__\n"
"**{}** flying a {}.".format(region_name, loser_name, loser_ship,
loser_modules, cargo_modules,
'{0:,.2f}'.format(float(isk_lost)),
winner_name, winner_ship))
await winner_user.send(embed=embed)
await loser_user.send(embed=embed)
await game_functions.track_player_kills(region)
await self.send_global(embed, True)
await self.destroy_ship(target)
await add_loss(target)
await add_kill(killing_blow, dropped_mods)
await add_xp(killing_blow, xp_gained)
await self.give_pvp_loot(killing_blow)
for key, log in combat_log.items():
user = self.bot.get_user(key)
entry_count = 0
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
log_array = []
field_count = 0
title = 'Combat Log'
for entry in log:
entry_count += 1
log_array.append(entry)
if entry_count >= 4:
if field_count >= 1:
title = '-'
clean_log = '\n'.join(log_array)
embed.add_field(name=title,
value=clean_log)
log_array = []
entry_count = 0
field_count += 1
if field_count >= 6:
await user.send(embed=embed)
if field_count >= 1:
title = '-'
if len(log_array) > 0:
clean_log = '\n'.join(log_array)
embed.add_field(name=title,
value=clean_log)
await user.send(embed=embed)
async def fleet_versus_fleet(self, fleet_one, fleet_two, region, damaged=None):
region_name = await game_functions.get_region(int(region))
self.logger.info('Fleet vs. Fleet Battle in {}'.format(region_name))
# Fleet stuff
attacker_fleet_array = ast.literal_eval(fleet_one[3])
attacker_fleet = []
attacker_fleet_lost = []
attacker_isk_lost = 0
attacker_damage_dealt = 0
attackers_in_system = 0
for member_id in attacker_fleet_array:
sql = ''' SELECT * FROM eve_rpg_players WHERE `id` = (?) '''
values = (int(member_id),)
member = await db.select_var(sql, values)
if member[0][4] != region:
continue
if member[0][6] == 1 or member[0][6] == 20:
continue
attacker_fleet.append(member[0])
attackers_in_system += 1
saved_attacker_fleet = attacker_fleet
attacker_count = len(attacker_fleet)
defender_fleet_array = ast.literal_eval(fleet_two[3])
defender_fleet = []
defender_fleet_lost = []
defender_isk_lost = 0
defender_damage_dealt = 0
defenders_in_system = 0
for member_id in defender_fleet_array:
sql = ''' SELECT * FROM eve_rpg_players WHERE `id` = (?) '''
values = (int(member_id),)
member = await db.select_var(sql, values)
if member[0][4] != region:
continue
if member[0][6] == 1 or member[0][6] == 20:
continue
defender_fleet.append(member[0])
defenders_in_system += 1
saved_defender_fleet = defender_fleet
defender_count = len(defender_fleet)
if attackers_in_system == 0 or defenders_in_system == 0:
return
# Give all participants a combat timer
merged_fleet = attacker_fleet + defender_fleet
for fleet_member in merged_fleet:
await add_combat_timer(fleet_member)
damaged_ships = {}
combat_log = {}
if damaged is not None:
damaged_ships = damaged
flee_array = []
fight_round = 0
for x in range(125):
fight_round += 1
if len(attacker_fleet) == 0 or len(defender_fleet) == 0:
break
merged_fleet = [x for x in
itertools.chain.from_iterable(itertools.zip_longest(attacker_fleet, defender_fleet)) if x]
on_field = attacker_fleet + defender_fleet
random.shuffle(on_field)
# manage regen/combat log creation
for player in merged_fleet:
if player[2] not in combat_log:
combat_log[player[2]] = []
if player[0] not in damaged_ships:
continue
defense = damaged_ships[player[0]]['defense']
player_ship = ast.literal_eval(player[14])
player_ship_details = await game_functions.get_ship(player_ship['ship_type'])
hit_points = player_ship_details['hit_points']
new_defense = await game_functions.manage_regen(player, defense)
if new_defense != defense:
user = self.bot.get_user(player[2])
name = user.display_name
self.logger.info(
'{} regened {} defense. Resulting in {} defense remaining.'.format(name, new_defense - defense,
new_defense))
combat_log[player[2]].append(
'Your ship has regenerated {} defense points and now has {} defense remaining and {} hit points.'.format(
round(new_defense - defense, 1), round(new_defense, 2), hit_points))
damaged_ships[player[0]] = {'defense': new_defense, 'hit_points': hit_points}
for attacker in merged_fleet:
if len(attacker_fleet) == 0 or len(defender_fleet) == 0:
break
# add chance attack doesn't occur
if random.random() > 0.8:
continue
attacker_ship = ast.literal_eval(attacker[14])
attacker_attack, attacker_defense, attacker_maneuver, attacker_tracking = \
await game_functions.get_combat_attributes(attacker, attacker_ship['ship_type'])
target = None
target_ship_details = None
for target in on_field:
# if in same fleet find new target
if target == attacker or ((target in attacker_fleet and attacker in attacker_fleet) or (
target in defender_fleet and attacker in defender_fleet)):
continue
else:
target_ship = ast.literal_eval(target[14])
target_ship_details = await game_functions.get_ship(target_ship['ship_type'])
target_attack, target_defense, target_maneuver, target_tracking = \
await game_functions.get_combat_attributes(target, attacker_ship['ship_type'])
break
if target_ship_details is None:
continue
# Figure out min/max damage
transversal = 1
if (target_maneuver * 0.75) > attacker_tracking + 1:
transversal = (attacker_tracking + 1) / (target_maneuver * 0.75)
minimum_attacker_damage = (attacker_attack * transversal)
maximum_attacker_damage = attacker_attack
attacker_triangular_medium = (minimum_attacker_damage + maximum_attacker_damage) / 4
damage = round(
random.triangular(minimum_attacker_damage, maximum_attacker_damage, attacker_triangular_medium), 3)
original_damage = damage
# log damage done for BR
if attacker in attacker_fleet:
attacker_damage_dealt += damage
else:
defender_damage_dealt += damage
# Determine if ship is already damaged
defense = target_defense
hit_points = target_ship_details['hit_points']
if target[0] in damaged_ships:
defense = damaged_ships[target[0]]['defense']
hit_points = damaged_ships[target[0]]['hit_points']
# if defense pool exist take damage out of it
if defense > 0:
if defense >= damage:
defense -= damage
else:
damage -= defense
defense = 0
hit_points -= damage
else:
hit_points -= damage
# if no damage done, continue
attacker_user, target_user = self.bot.get_user(attacker[2]), self.bot.get_user(target[2])
attacker_name, target_name = attacker_user.display_name, target_user.display_name
self.logger.info(
'{} attacked {} for {} damage. Resulting in {} defense remaining and {} hits.'.format(attacker_name,
target_name,
original_damage,
defense,
hit_points))
combat_log[attacker[2]].append(
'__*Attack*__ You attacked {} inflicting {} damage. Your target has {} defense remaining and {} hit points.'.format(
target_name, round(original_damage, 2), round(defense, 2), hit_points))
combat_log[target[2]].append(
'__*Defense*__ You were attacked by {} and received {} damage. You have {} defense remaining and {} hit points.'.format(
attacker_name, round(original_damage, 2), round(defense, 2), hit_points))
# if no damage done, continue
if damage <= 0:
continue
# if target survives, store damage
if hit_points > 0:
if target[0] in damaged_ships:
attackers = list(set(damaged_ships[target[0]]['attackers'].append(attacker[0])))
damaged_ships[target[0]] = {'hit_points': hit_points, 'defense': defense,
'attackers': attackers}
else:
damaged_ships[target[0]] = {'hit_points': hit_points, 'defense': defense,
'attackers': [attacker[0]]}
# if badly damage target will attempt to flee
if defense < target_defense * 0.15:
flee = await weighted_choice([(True, target_maneuver), (False, attacker_tracking)])
if flee is True:
self.logger.info(
'{} has successfully fled the field.'.format(target_name))
combat_log[target[2]].append(
'__*Flee*__ You have fled the field. You have {} defense remaining and {} hit points.'.format(
round(defense, 2), hit_points))
combat_log[attacker[2]].append(
'__*Enemy Fled*__ {} has disengaged and warped away.'.format(target_name))
if target not in attacker_fleet:
defender_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
attacker_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
# Handle Cloak
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
if module_item['id'] == 40 or module_item['id'] == 41:
escape = await weighted_choice([(True, 50), (False, 50)])
if escape is True:
self.logger.info(
'{} has successfully fled the field.'.format(target_name))
combat_log[target[2]].append(
'__*Flee*__ You have cloaked. You have {} defense remaining and {} hit points.'.format(
round(defense, 2), hit_points))
combat_log[attacker[2]].append(
'__*Enemy Fled*__ {} has cloaked and warped away.'.format(
target_name))
flee_array.append(target[0])
if target not in attacker_fleet:
defender_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
attacker_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
if target in merged_fleet:
merged_fleet.remove(target)
if target in on_field:
on_field.remove(target)
killing_blow = attacker
other_names = []
other_users = []
if target[0] in damaged_ships:
for on_mail in damaged_ships[target[0]]['attackers']:
if on_mail == killing_blow[0]:
continue
sql = ''' SELECT * FROM eve_rpg_players WHERE `id` = (?) '''
values = (on_mail,)
player = await db.select_var(sql, values)
on_mail_name = self.bot.get_user(int(player[0][2])).display_name
if player[0][23] is not None:
corp_info = await game_functions.get_user_corp(player[0][23])
on_mail_name = '{} [{}]'.format(on_mail_name, corp_info[4])
other_users.append(player[0])
other_names.append('{}'.format(on_mail_name))
clean_names = '\n'.join(other_names)
if len(other_names) > 6:
clean_names = '\n{} fleet members.'.format(len(other_names))
winner_user, loser_user = self.bot.get_user(killing_blow[2]), self.bot.get_user(target[2])
winner_name = winner_user.display_name
if killing_blow[23] is not None:
corp_info = await game_functions.get_user_corp(killing_blow[23])
winner_name = '{} [{}]'.format(winner_name, corp_info[4])
loser_name = self.bot.get_user(int(target[2])).display_name
if target[23] is not None:
corp_info = await game_functions.get_user_corp(target[23])
loser_name = '{} [{}]'.format(loser_name, corp_info[4])
winner_ship_obj = ast.literal_eval(killing_blow[14])
winner_ship = await game_functions.get_ship_name(int(winner_ship_obj['ship_type']))
loser_ship_obj = ast.literal_eval(target[14])
loser_ship = await game_functions.get_ship_name(int(loser_ship_obj['ship_type']))
loser_ship_info = await game_functions.get_ship(int(loser_ship_obj['ship_type']))
loser_modules = ''
cargo_modules = ''
loser_modules_array = []
cargo_modules_array = []
dropped_mods = []
module_value = 0
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
loser_modules_array.append('{} {}'.format(module_item['name'], module_drop))
loser_module_list = '\n'.join(loser_modules_array)
loser_modules = '\n\n__Modules Lost__\n{}'.format(loser_module_list)
if 'module_cargo_bay' in loser_ship_obj:
modules = loser_ship_obj['module_cargo_bay']
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
loser_modules_array.append('{} {}'.format(module_item['name'], module_drop))
cargo_module_list = '\n'.join(cargo_modules_array)
cargo_modules = '\n\n__Cargo Lost__\n{}'.format(cargo_module_list)
xp_gained = await weighted_choice([(5, 45), (15, 25), (27, 15)])
isk_lost = module_value + loser_ship_info['isk']
if target in attacker_fleet:
attacker_fleet.remove(target)
attacker_fleet_lost.append(target)
attacker_isk_lost += isk_lost
elif target in defender_fleet:
defender_fleet.remove(target)
defender_fleet_lost.append(target)
defender_isk_lost += isk_lost
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
ship_image = await game_functions.get_ship_image(loser_ship_obj['ship_type'])
embed.set_thumbnail(url="{}".format(ship_image))
embed.add_field(name="Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed.{}{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"__**Final Blow**__\n"
"**{}** flying a {}.\n\n"
"__**Other Killers**__\n{}".format(region_name, loser_name, loser_ship,
loser_modules, cargo_modules,
'{0:,.2f}'.format(float(isk_lost)),
winner_name, winner_ship, clean_names))
await winner_user.send(embed=embed)
await loser_user.send(embed=embed)
await game_functions.track_player_kills(region)
await self.send_global(embed, True)
await self.destroy_ship(target)
await add_loss(target)
await add_kill(killing_blow, dropped_mods)
await add_xp(killing_blow, xp_gained)
await self.give_pvp_loot(killing_blow)
dropped_mods = []
for user in other_users:
await add_kill(user, dropped_mods)
await add_xp(user, xp_gained)
if len(merged_fleet) == 0:
break
ongoing_text = ''
self.logger.info(
'Fight lasted {} rounds. Fleet 1 lost {} ships and took {} damage. Fleet 2 lost {} ships and took {} damage'.format(
fight_round, len(attacker_fleet_lost), defender_damage_dealt, len(defender_fleet_lost),
attacker_damage_dealt))
if len(attacker_fleet) > 0 and len(defender_fleet) > 0:
ongoing_text = '\n\n**This Battle Is Still Ongoing**'
self.ongoing_fleet_fights[region] = {'attacker': attacker_fleet, 'defender': defender_fleet,
'region': region, 'damaged': damaged_ships}
if len(attacker_fleet_lost) > 0 or len(defender_fleet_lost) > 0:
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
embed.add_field(name="Fleet Battle Report",
value="Region: {}\n"
"Total Players Involved: {}\n"
"Ships Destroyed: {}\n"
"Total ISK Lost: {} ISK\n"
"Total Damage Done: {}\n{}".format(region_name, defender_count + attacker_count,
len(attacker_fleet_lost) + len(
defender_fleet_lost),
'{0:,.2f}'.format(
float(attacker_isk_lost + defender_isk_lost)),
attacker_damage_dealt + defender_damage_dealt,
ongoing_text),
inline=False)
embed.add_field(name="Fleet One Stats",
value="Fleet Size: {} Players\n"
"Total Losses: {}\n"
"ISK Lost: {} ISK\n"
"Total Damage Received: {}".format(attacker_count, len(attacker_fleet_lost),
'{0:,.2f}'.format(float(attacker_isk_lost)),
defender_damage_dealt),
inline=False)
fleet_one_members_array = []
counter = 0
for member in saved_attacker_fleet:
member_ship = ast.literal_eval(member[14])
ship_details = await game_functions.get_ship(member_ship['ship_type'])
name = self.bot.get_user(int(member[2])).display_name
if member in attacker_fleet_lost:
fleet_one_members_array.append('**Killed** {} - *{}*'.format(name, ship_details['name']))
elif member[0] in flee_array:
fleet_one_members_array.append('**Fled Battle** {} - *{}*'.format(name, ship_details['name']))
else:
fleet_one_members_array.append('{} - *{}*'.format(name, ship_details['name']))
if counter >= 10:
counter = 0
fleet_one_members_clean = '\n'.join(fleet_one_members_array)
embed.add_field(name="__Fleet One Members__",
value=fleet_one_members_clean)
fleet_one_members_array = []
if len(fleet_one_members_array) > 0:
fleet_one_members_clean = '\n'.join(fleet_one_members_array)
embed.add_field(name="__Fleet One Members__",
value=fleet_one_members_clean)
embed.add_field(name="Fleet Two Stats",
value="Fleet Size: {} Players\n"
"Total Losses: {}\n"
"ISK Lost: {} ISK\n"
"Total Damage Received: {}".format(defender_count, len(defender_fleet_lost),
'{0:,.2f}'.format(float(defender_isk_lost)),
attacker_damage_dealt),
inline=False)
fleet_two_members_array = []
counter = 0
for member in saved_defender_fleet:
member_ship = ast.literal_eval(member[14])
ship_details = await game_functions.get_ship(member_ship['ship_type'])
name = self.bot.get_user(int(member[2])).display_name
if member in defender_fleet_lost:
fleet_two_members_array.append('**Killed** {} - *{}*'.format(name, ship_details['name']))
elif member[0] in flee_array:
fleet_one_members_array.append('**Fled Battle** {} - *{}*'.format(name, ship_details['name']))
else:
fleet_two_members_array.append('{} - *{}*'.format(name, ship_details['name']))
if counter >= 10:
counter = 0
fleet_two_members_clean = '\n'.join(fleet_two_members_array)
embed.add_field(name="__Fleet Two Members__",
value=fleet_two_members_clean)
fleet_two_members_array = []
if len(fleet_two_members_array) > 0:
fleet_two_members_clean = '\n'.join(fleet_two_members_array)
embed.add_field(name="__Fleet Two Members__",
value=fleet_two_members_clean)
for fleet_member in merged_fleet:
user = self.bot.get_user(fleet_member[2])
await user.send(embed=embed)
await self.send_global(embed, True)
for key, log in combat_log.items():
user = self.bot.get_user(key)
entry_count = 0
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
log_array = []
field_count = 0
title = 'Combat Log'
for entry in log:
entry_count += 1
log_array.append(entry)
if entry_count >= 4:
if field_count >= 1:
title = '-'
clean_log = '\n'.join(log_array)
embed.add_field(name=title,
value=clean_log)
log_array = []
entry_count = 0
field_count += 1
if field_count >= 6:
await user.send(embed=embed)
if field_count >= 1:
title = '-'
if len(log_array) > 0:
clean_log = '\n'.join(log_array)
embed.add_field(name=title,
value=clean_log)
await user.send(embed=embed)
async def fleet_versus_player(self, fleet_one, player, region):
region_name = await game_functions.get_region(int(region))
self.logger.info('Fleet vs. Player Battle in {}'.format(region_name))
# Fleet stuff
attacker_fleet_array = ast.literal_eval(fleet_one[3])
attacker_fleet = []
saved_attacker_fleet = []
attacker_fleet_lost = []
attacker_isk_lost = 0
attacker_damage_dealt = 0
attackers_in_system = 0
for member_id in attacker_fleet_array:
sql = ''' SELECT * FROM eve_rpg_players WHERE `id` = (?) '''
values = (int(member_id),)
member = await db.select_var(sql, values)
if member[0][4] != region:
continue
if member[0][6] == 1 or member[0][6] == 20:
continue
saved_attacker_fleet.append(member[0])
attacker_fleet.append(member[0])
attackers_in_system += 1
if attackers_in_system == 0:
return
attacker_count = len(attacker_fleet)
defender_fleet = [player]
saved_defender_fleet = [player]
defender_count = len(defender_fleet)
defender_fleet_lost = []
defender_isk_lost = 0
defender_damage_dealt = 0
# Give all participants a combat timer
merged_fleet = attacker_fleet + defender_fleet
for fleet_member in merged_fleet:
await add_combat_timer(fleet_member)
damaged_ships = {}
combat_log = {}
flee_array = []
fight_round = 0
for x in range(125):
fight_round += 1
if len(attacker_fleet) == 0 or len(defender_fleet) == 0:
break
merged_fleet = [x for x in
itertools.chain.from_iterable(itertools.zip_longest(attacker_fleet, defender_fleet)) if x]
on_field = attacker_fleet + defender_fleet
random.shuffle(on_field)
# manage regen/combat log creation
for player in merged_fleet:
if player[2] not in combat_log:
combat_log[player[2]] = []
if player[0] not in damaged_ships:
continue
defense = damaged_ships[player[0]]['defense']
player_ship = ast.literal_eval(player[14])
player_ship_details = await game_functions.get_ship(player_ship['ship_type'])
hit_points = player_ship_details['hit_points']
new_defense = await game_functions.manage_regen(player, defense)
if new_defense != defense:
user = self.bot.get_user(player[2])
name = user.display_name
self.logger.info(
'{} regened {} defense. Resulting in {} defense remaining.'.format(name, new_defense - defense,
new_defense))
combat_log[player[2]].append(
'Your ship has regenerated {} defense points and now has {} defense remaining and {} hit points.'.format(
round(new_defense - defense, 1), round(new_defense, 2), hit_points))
damaged_ships[player[0]] = {'defense': new_defense, 'hit_points': hit_points}
for attacker in merged_fleet:
if len(attacker_fleet) == 0 or len(defender_fleet) == 0:
break
# add chance attack doesn't occur
if random.random() > 0.8:
continue
attacker_ship = ast.literal_eval(attacker[14])
attacker_attack, attacker_defense, attacker_maneuver, attacker_tracking = \
await game_functions.get_combat_attributes(attacker, attacker_ship['ship_type'])
target = None
target_ship_details = None
for target in on_field:
# if in same fleet find new target
if target == attacker or ((target in attacker_fleet and attacker in attacker_fleet) or (
target in defender_fleet and attacker in defender_fleet)):
continue
else:
target_ship = ast.literal_eval(target[14])
target_ship_details = await game_functions.get_ship(target_ship['ship_type'])
target_attack, target_defense, target_maneuver, target_tracking = \
await game_functions.get_combat_attributes(target, attacker_ship['ship_type'])
break
if target_ship_details is None:
continue
# Figure out min/max damage
transversal = 1
if (target_maneuver * 0.75) > attacker_tracking + 1:
transversal = (attacker_tracking + 1) / (target_maneuver * 0.75)
minimum_attacker_damage = (attacker_attack * transversal)
maximum_attacker_damage = attacker_attack
attacker_triangular_medium = (minimum_attacker_damage + maximum_attacker_damage) / 4
damage = round(
random.triangular(minimum_attacker_damage, maximum_attacker_damage, attacker_triangular_medium), 3)
original_damage = damage
# log damage done for BR
if attacker in attacker_fleet:
attacker_damage_dealt += damage
else:
defender_damage_dealt += damage
# Determine if ship is already damaged
defense = target_defense
hit_points = target_ship_details['hit_points']
if target[0] in damaged_ships:
defense = damaged_ships[target[0]]['defense']
hit_points = damaged_ships[target[0]]['hit_points']
# if defense pool exist take damage out of it
if defense > 0:
if defense >= damage:
defense -= damage
else:
damage -= defense
defense = 0
hit_points -= damage
else:
hit_points -= damage
# if no damage done, continue
attacker_user, target_user = self.bot.get_user(attacker[2]), self.bot.get_user(target[2])
attacker_name, target_name = attacker_user.display_name, target_user.display_name
self.logger.info(
'{} attacked {} for {} damage. Resulting in {} defense remaining and {} hits.'.format(attacker_name,
target_name,
original_damage,
defense,
hit_points))
combat_log[attacker[2]].append(
'__*Attack*__ You attacked {} inflicting {} damage. Your target has {} defense remaining and {} hit points.'.format(
target_name, round(original_damage, 2), round(defense, 2), hit_points))
combat_log[target[2]].append(
'__*Defense*__ You were attacked by {} and received {} damage. You have {} defense remaining and {} hit points.'.format(
attacker_name, round(original_damage, 2), round(defense, 2), hit_points))
if damage <= 0:
continue
# if target survives, store damage
if hit_points > 0:
if target[0] in damaged_ships:
attackers = list(set(damaged_ships[target[0]]['attackers'].append(attacker[0])))
damaged_ships[target[0]] = {'hit_points': hit_points, 'defense': defense,
'attackers': attackers}
else:
damaged_ships[target[0]] = {'hit_points': hit_points, 'defense': defense,
'attackers': [attacker[0]]}
# if badly damage target will attempt to flee
if defense < target_defense * 0.15:
flee = await weighted_choice([(True, target_maneuver), (False, attacker_tracking)])
if flee is True:
self.logger.info(
'{} has successfully fled the field.'.format(target_name))
combat_log[target[2]].append(
'__*Flee*__ You have fled the field. You have {} defense remaining and {} hit points.'.format(
round(defense, 2), hit_points))
combat_log[attacker[2]].append(
'__*Enemy Fled*__ {} has disengaged and warped away.'.format(target_name))
if target not in attacker_fleet:
defender_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
attacker_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
# Handle Cloak
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
if module_item['id'] == 40 or module_item['id'] == 41:
escape = await weighted_choice([(True, 50), (False, 50)])
if escape is True:
self.logger.info(
'{} has successfully fled the field.'.format(target_name))
combat_log[target[2]].append(
'__*Flee*__ You have cloaked. You have {} defense remaining and {} hit points.'.format(
round(defense, 2), hit_points))
combat_log[attacker[2]].append(
'__*Enemy Fled*__ {} has cloaked and warped away.'.format(
target_name))
flee_array.append(target[0])
if target not in attacker_fleet:
defender_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
attacker_fleet.remove(target)
merged_fleet.remove(target)
on_field.remove(target)
continue
else:
if target in merged_fleet:
merged_fleet.remove(target)
if target in on_field:
on_field.remove(target)
killing_blow = attacker
other_names = []
other_users = []
if target[0] in damaged_ships:
for on_mail in damaged_ships[target[0]]['attackers']:
if on_mail == killing_blow[0]:
continue
sql = ''' SELECT * FROM eve_rpg_players WHERE `id` = (?) '''
values = (on_mail,)
player = await db.select_var(sql, values)
on_mail_name = self.bot.get_user(int(player[0][2])).display_name
if player[0][23] is not None:
corp_info = await game_functions.get_user_corp(player[0][23])
on_mail_name = '{} [{}]'.format(on_mail_name, corp_info[4])
other_users.append(player[0])
other_names.append('{}'.format(on_mail_name))
clean_names = '\n'.join(other_names)
if len(other_names) > 6:
clean_names = '\n{} fleet members.'.format(len(other_names))
winner_user, loser_user = self.bot.get_user(killing_blow[2]), self.bot.get_user(target[2])
winner_name = winner_user.display_name
if killing_blow[23] is not None:
corp_info = await game_functions.get_user_corp(killing_blow[23])
winner_name = '{} [{}]'.format(winner_name, corp_info[4])
loser_name = self.bot.get_user(int(target[2])).display_name
if target[23] is not None:
corp_info = await game_functions.get_user_corp(target[23])
loser_name = '{} [{}]'.format(loser_name, corp_info[4])
winner_ship_obj = ast.literal_eval(killing_blow[14])
winner_ship = await game_functions.get_ship_name(int(winner_ship_obj['ship_type']))
loser_ship_obj = ast.literal_eval(target[14])
loser_ship = await game_functions.get_ship_name(int(loser_ship_obj['ship_type']))
loser_ship_info = await game_functions.get_ship(int(loser_ship_obj['ship_type']))
loser_modules = ''
cargo_modules = ''
loser_modules_array = []
cargo_modules_array = []
dropped_mods = []
module_value = 0
if target[12] is not None:
modules = ast.literal_eval(target[12])
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
loser_modules_array.append('{} {}'.format(module_item['name'], module_drop))
loser_module_list = '\n'.join(loser_modules_array)
loser_modules = '\n\n__Modules Lost__\n{}'.format(loser_module_list)
if 'module_cargo_bay' in loser_ship_obj:
modules = loser_ship_obj['module_cargo_bay']
for module in modules:
module_item = await game_functions.get_module(module)
dropped = await weighted_choice([(True, 50), (False, 50)])
module_drop = ''
module_value += module_item['isk']
if dropped is True:
dropped_mods.append(module)
module_drop = ' **Module Dropped**'
loser_modules_array.append('{} {}'.format(module_item['name'], module_drop))
cargo_module_list = '\n'.join(cargo_modules_array)
cargo_modules = '\n\n__Cargo Lost__\n{}'.format(cargo_module_list)
xp_gained = await weighted_choice([(5, 45), (15, 25), (27, 15)])
isk_lost = module_value + loser_ship_info['isk']
if target in attacker_fleet:
attacker_fleet.remove(target)
attacker_fleet_lost.append(target)
attacker_isk_lost += isk_lost
elif target in defender_fleet:
defender_fleet.remove(target)
defender_fleet_lost.append(target)
defender_isk_lost += isk_lost
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
ship_image = await game_functions.get_ship_image(loser_ship_obj['ship_type'])
embed.set_thumbnail(url="{}".format(ship_image))
embed.add_field(name="Killmail",
value="**Region** - {}\n\n"
"__**Loser**__\n"
"**{}** flying a {} was killed.{}{}\n\n"
"Total ISK Lost: {} ISK\n\n"
"__**Final Blow**__\n"
"**{}** flying a {}.\n\n"
"__**Other Killers**__\n{}".format(region_name, loser_name, loser_ship,
loser_modules, cargo_modules,
'{0:,.2f}'.format(float(isk_lost)),
winner_name, winner_ship, clean_names))
await winner_user.send(embed=embed)
await loser_user.send(embed=embed)
await game_functions.track_player_kills(region)
await self.send_global(embed, True)
await self.destroy_ship(target)
await add_loss(target)
await add_kill(killing_blow, dropped_mods)
await add_xp(killing_blow, xp_gained)
await self.give_pvp_loot(killing_blow)
dropped_mods = []
for user in other_users:
await add_kill(user, dropped_mods)
await add_xp(user, xp_gained)
if len(merged_fleet) == 0:
break
self.logger.info(
'Fight lasted {} rounds. Fleet lost {} ships and took {} damage. Player lost {} ships and took {} damage'.format(
fight_round, len(attacker_fleet_lost), defender_damage_dealt, len(defender_fleet_lost),
attacker_damage_dealt))
ongoing_text = ''
if len(attacker_fleet) > 0 and len(defender_fleet) > 0:
ongoing_text = '\n\n**This Battle Is Still Ongoing**'
self.ongoing_fleet_fights[region] = {'attacker': attacker_fleet, 'defender': defender_fleet,
'region': region, 'damaged': damaged_ships}
if len(attacker_fleet_lost) > 0 or len(defender_fleet_lost) > 0:
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
embed.add_field(name="Fleet Battle Report",
value="Region: {}\n"
"Total Players Involved: {}\n"
"Ships Destroyed: {}\n"
"Total ISK Lost: {} ISK\n"
"Total Damage Done: {}\n{}".format(region_name, defender_count + attacker_count,
len(attacker_fleet_lost) + len(
defender_fleet_lost),
'{0:,.2f}'.format(
float(attacker_isk_lost + defender_isk_lost)),
attacker_damage_dealt + defender_damage_dealt,
ongoing_text),
inline=False)
embed.add_field(name="Fleet One Stats",
value="Fleet Size: {} Players\n"
"Total Losses: {}\n"
"ISK Lost: {} ISK\n"
"Total Damage Received: {}".format(attacker_count, len(attacker_fleet_lost),
'{0:,.2f}'.format(float(attacker_isk_lost)),
defender_damage_dealt),
inline=False)
fleet_one_members_array = []
counter = 0
for member in saved_attacker_fleet:
member_ship = ast.literal_eval(member[14])
ship_details = await game_functions.get_ship(member_ship['ship_type'])
name = self.bot.get_user(int(member[2])).display_name
if member in attacker_fleet_lost:
fleet_one_members_array.append('**Killed** {} - *{}*'.format(name, ship_details['name']))
elif member[0] in flee_array:
fleet_one_members_array.append('**Fled Battle** {} - *{}*'.format(name, ship_details['name']))
else:
fleet_one_members_array.append('{} - *{}*'.format(name, ship_details['name']))
if counter >= 10:
counter = 0
fleet_one_members_clean = '\n'.join(fleet_one_members_array)
embed.add_field(name="__Fleet One Members__",
value=fleet_one_members_clean)
fleet_one_members_array = []
if len(fleet_one_members_array) > 0:
fleet_one_members_clean = '\n'.join(fleet_one_members_array)
embed.add_field(name="__Fleet One Members__",
value=fleet_one_members_clean)
embed.add_field(name="Fleet Two Stats",
value="Fleet Size: {} Players\n"
"Total Losses: {}\n"
"ISK Lost: {} ISK\n"
"Total Damage Received: {}".format(defender_count, len(defender_fleet_lost),
'{0:,.2f}'.format(float(defender_isk_lost)),
attacker_damage_dealt),
inline=False)
fleet_two_members_array = []
counter = 0
for member in saved_defender_fleet:
member_ship = ast.literal_eval(member[14])
ship_details = await game_functions.get_ship(member_ship['ship_type'])
name = self.bot.get_user(int(member[2])).display_name
if member in defender_fleet_lost:
fleet_two_members_array.append('**Killed** {} - *{}*'.format(name, ship_details['name']))
elif member[0] in flee_array:
fleet_two_members_array.append('**Fled Battle** {} - *{}*'.format(name, ship_details['name']))
else:
fleet_two_members_array.append('{} - *{}*'.format(name, ship_details['name']))
if counter >= 10:
counter = 0
fleet_two_members_clean = '\n'.join(fleet_two_members_array)
embed.add_field(name="__Fleet Two Members__",
value=fleet_two_members_clean)
fleet_two_members_array = []
if len(fleet_two_members_array) > 0:
fleet_two_members_clean = '\n'.join(fleet_two_members_array)
embed.add_field(name="__Fleet Two Members__",
value=fleet_two_members_clean)
for fleet_member in merged_fleet:
user = self.bot.get_user(fleet_member[2])
await user.send(embed=embed)
await self.send_global(embed, True)
for key, log in combat_log.items():
user = self.bot.get_user(key)
entry_count = 0
embed = make_embed(icon=self.bot.user.avatar)
embed.set_footer(icon_url=self.bot.user.avatar_url,
text="Aura - EVE Text RPG")
log_array = []
field_count = 0
title = 'Combat Log'
for entry in log:
entry_count += 1
log_array.append(entry)
if entry_count >= 4:
if field_count >= 1:
title = '-'
clean_log = '\n'.join(log_array)
embed.add_field(name=title,
value=clean_log)
log_array = []
entry_count = 0
field_count += 1
if field_count >= 6:
await user.send(embed=embed)
if field_count >= 1:
title = '-'
if len(log_array) > 0:
clean_log = '\n'.join(log_array)
embed.add_field(name=title,
value=clean_log)
await user.send(embed=embed)
async def send_global(self, message, embed=False):
sql = "SELECT * FROM eve_rpg_channels"
game_channels = await db.select(sql)
for channels in game_channels:
channel = self.bot.get_channel(int(channels[2]))
if channel is None:
self.logger.exception('eve_rpg - Bad Channel Attempted removing....')
await self.remove_bad_channel(channels[2])
continue
if embed is False:
await channel.send(message)
continue
else:
await channel.send(embed=message)
async def remove_bad_user(self, player_id):
sql = ''' DELETE FROM eve_rpg_players WHERE `player_id` = (?) '''
values = (player_id,)
await db.execute_sql(sql, values)
return self.logger.info('eve_rpg - Bad player removed successfully')
async def remove_bad_user_id(self, player_id):
sql = ''' DELETE FROM eve_rpg_players WHERE `id` = (?) '''
values = (player_id,)
await db.execute_sql(sql, values)
return self.logger.info('eve_rpg - Bad player removed successfully')
async def remove_bad_channel(self, channel_id):
sql = ''' DELETE FROM eve_rpg_channels WHERE `channel_id` = (?) '''
values = (channel_id,)
await db.execute_sql(sql, values)
return self.logger.info('eve_rpg - Bad Channel removed successfully')
async def remove_bad_fleet(self, fleet_id):
sql = ''' DELETE FROM fleet_info WHERE `fleet_id` = (?) '''
values = (fleet_id,)
await db.execute_sql(sql, values)
return self.logger.info('eve_rpg - Bad fleet removed successfully')
async def destroy_ship(self, player):
player = await game_functions.refresh_player(player)
ship_id = 1
if player[3] == 1:
ship_id = 1
elif player[3] == 2:
ship_id = 2
elif player[3] == 3:
ship_id = 3
elif player[3] == 4:
ship_id = 4
elif player[3] == 99:
ship_id = 5
lost_ship = ast.literal_eval(player[14])
new_id = await game_functions.create_unique_id()
ship = {'id': new_id, 'ship_type': ship_id}
sql = ''' UPDATE eve_rpg_players
SET ship = (?),
modules = NULL,
region = (?),
task = 1
WHERE
player_id = (?); '''
values = (str(ship), player[18], player[2],)
await db.execute_sql(sql, values)
if 'insured' in lost_ship:
channel = self.bot.get_user(player[2])
insurance_payout = '{0:,.2f}'.format(float(lost_ship['insurance_payout']))
if player[6] == 4:
return await channel.send(
'**Insurance DENIED** We regret to inform you that because you were performing'
' a criminal act at the time of your death we will be keeping your payout of '
'{} ISK.'.format(insurance_payout))
lost_ship_details = await game_functions.get_ship(lost_ship['ship_type'])
await channel.send('**Insurance Payout Received**\n\nThe loss of your {} was covered by insurance, {} ISK '
'has been deposited into your account.'.format(lost_ship_details['name'],
insurance_payout))
sql = ''' UPDATE eve_rpg_players
SET isk = (?)
WHERE
player_id = (?); '''
new_isk = float(player[5]) + float(lost_ship['insurance_payout'])
values = (int(float(new_isk)), player[2],)
await update_journal(player, lost_ship['insurance_payout'], 'Received Insurance')
return await db.execute_sql(sql, values)
async def give_pvp_loot(self, player):
player = await game_functions.refresh_player(player)
ship = ast.literal_eval(player[14])
tier_1_amount = random.randint(1, 50)
tier_1 = await weighted_choice([(True, 90), (False, 10)])
tier_1_text = ''
tier_2_amount = random.randint(1, 10)
tier_2 = await weighted_choice([(True, 55), (False, 45)])
tier_2_text = ''
tier_3_amount = random.randint(1, 3)
tier_3 = await weighted_choice([(True, 5), (False, 95)])
tier_3_text = ''
if 'component_cargo_bay' in ship:
loot = ship['component_cargo_bay']
else:
loot = []
if tier_1 is True:
loot_id = await game_functions.create_unique_id()
component = await game_functions.get_component(1)
tier_1_loot = {'type_id': 1, 'id': loot_id, 'amount': tier_1_amount}
loot.append(tier_1_loot)
tier_1_text = '{}x {}\n'.format(tier_1_amount, component['name'])
if tier_2 is True:
loot_id = await game_functions.create_unique_id()
loot_type = await weighted_choice([(2, 35), (3, 65)])
component = await game_functions.get_component(loot_type)
tier_2_loot = {'type_id': loot_type, 'id': loot_id, 'amount': tier_2_amount}
loot.append(tier_2_loot)
tier_2_text = '{}x {}\n'.format(tier_2_amount, component['name'])
if tier_3 is True:
loot_id = await game_functions.create_unique_id()
loot_type = await weighted_choice([(4, 65), (5, 35)])
component = await game_functions.get_component(loot_type)
tier_3_loot = {'type_id': loot_type, 'id': loot_id, 'amount': tier_3_amount}
loot.append(tier_3_loot)
tier_3_text = '{}x {}\n'.format(tier_3_amount, component['name'])
if tier_1 is True or tier_2 is True or tier_3 is True:
channel = self.bot.get_user(player[2])
await channel.send('**Ship Component Salvage Received**\n{}{}{}\n\n*Salvage stored in your ships component'
' cargo bay. Dock and do !!me to see an option to empty it*'.format(tier_1_text,
tier_2_text,
tier_3_text))
ship['component_cargo_bay'] = loot
sql = ''' UPDATE eve_rpg_players
SET ship = (?)
WHERE
player_id = (?); '''
values = (str(ship), player[2],)
return await db.execute_sql(sql, values)
async def pve_loot(self, player, chance, overseer=False, officer=False):
false = 200 - int(chance)
loot_drop = await weighted_choice([(True, chance), (False, false)])
if loot_drop is True or officer is True:
player = await game_functions.refresh_player(player)
ship = ast.literal_eval(player[14])
loot_type = await weighted_choice([(200, 25), (201, 25), (202, 25), (203, 25), (204, 25)])
item = await game_functions.get_module(loot_type)
if 'module_cargo_bay' in ship:
loot = ship['module_cargo_bay']
loot.append(loot_type)
else:
loot = [loot_type]
channel = self.bot.get_user(player[2])
await channel.send('**PVE Loot Received**\n\n**{}**\n\n*Get to a station and empty your module '
'bay to get it*'.format(item['name']))
ship['module_cargo_bay'] = loot
sql = ''' UPDATE eve_rpg_players
SET ship = (?)
WHERE
player_id = (?); '''
values = (str(ship), player[2],)
await db.execute_sql(sql, values)
if overseer is True:
player = await game_functions.refresh_player(player)
ship = ast.literal_eval(player[14])
loot_type = await weighted_choice([(205, 50), (206, 25), (207, 10), (208, 5)])
item = await game_functions.get_module(loot_type)
if 'module_cargo_bay' in ship:
loot = ship['module_cargo_bay']
loot.append(loot_type)
else:
loot = [loot_type]
channel = self.bot.get_user(player[2])
await channel.send('**PVE Loot Received**\n\n**{}**\n\n*Get to a station and empty your module '
'bay to get it*'.format(item['name']))
ship['module_cargo_bay'] = loot
sql = ''' UPDATE eve_rpg_players
SET ship = (?)
WHERE
player_id = (?); '''
values = (str(ship), player[2],)
await db.execute_sql(sql, values)
| 56.352575
| 201
| 0.479974
| 13,428
| 135,697
| 4.585121
| 0.038948
| 0.026815
| 0.036252
| 0.033426
| 0.836086
| 0.800224
| 0.773766
| 0.751774
| 0.73347
| 0.713216
| 0
| 0.018583
| 0.432898
| 135,697
| 2,407
| 202
| 56.375987
| 0.781493
| 0.010361
| 0
| 0.73634
| 0
| 0.009107
| 0.120549
| 0.001818
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000434
| false
| 0
| 0.003903
| 0
| 0.019081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c45b17833c86422af54590e37d0ac8695da5a411
| 5,520
|
py
|
Python
|
2021/test_day2.py
|
snoyes/AoC
|
d6ac6f1677b29aa2695645a0413cfbb1af9252d1
|
[
"MIT"
] | null | null | null |
2021/test_day2.py
|
snoyes/AoC
|
d6ac6f1677b29aa2695645a0413cfbb1af9252d1
|
[
"MIT"
] | null | null | null |
2021/test_day2.py
|
snoyes/AoC
|
d6ac6f1677b29aa2695645a0413cfbb1af9252d1
|
[
"MIT"
] | 1
|
2020-12-24T22:03:08.000Z
|
2020-12-24T22:03:08.000Z
|
from random import *
from day2 import *
exampleInput = """
forward 5
down 5
forward 8
up 3
down 8
forward 2
""".strip()
exampleData = list(map(parser, exampleInput.split("\n")))
def test_part1_example():
assert part1(exampleData) == 150
minVal, maxVal = -10000, 10000
maxNumDirections = 1000
commands = ('forward', 'down', 'up')
def test_part1_order_irrelevant():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(minVal, maxVal)) for _ in range(numDirections)]
assert part1(inputData) == part1(sample(inputData, k=len(inputData)))
def test_part1_negate_numbers():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(minVal, maxVal)) for _ in range(numDirections)]
invertedData = [(action, -val) for action, val in inputData]
assert part1(inputData) == part1(invertedData)
def test_part1_up_to_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(0, maxVal)) for _ in range(numDirections)]
commandMap = {'forward': 'forward', 'down': 'down', 'up': 'down'}
invertedData = [(commandMap[action], val) for action, val in inputData]
assert part1(inputData) <= part1(invertedData)
def test_part1_down_to_up():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(0, maxVal)) for _ in range(numDirections)]
commandMap = {'forward': 'forward', 'down': 'up', 'up': 'up'}
invertedData = [(commandMap[action], val) for action, val in inputData]
assert part1(inputData) >= part1(invertedData)
def test_part1_upside_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(minVal, maxVal)) for _ in range(numDirections)]
commandMap = {'forward': 'forward', 'down': 'up', 'up': 'down'}
invertedData = [(commandMap[action], val) for action, val in inputData]
assert part1(inputData) == -part1(invertedData)
def test_part1_no_forward():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(('down', 'up')), randrange(minVal, maxVal)) for _ in range(numDirections)]
assert part1(inputData) == 0
def test_part1_no_up_or_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [('forward', randrange(minVal, maxVal)) for _ in range(numDirections)]
assert part1(inputData) == 0
def test_part1_no_up():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(('forward', 'down')), randrange(0, maxVal)) for _ in range(numDirections)]
assert part1(inputData) >= 0
def test_part1_no_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(('forward', 'up')), randrange(0, maxVal)) for _ in range(numDirections)]
assert part1(inputData) <= 0
def test_part1_new_forward():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(1, maxVal)) for _ in range(numDirections)]
extraCommand = ('forward', randrange(1, maxVal))
i = randint(0, numDirections)
extraData = inputData[:i] + [extraCommand] + inputData[i:]
assert abs(part1(inputData)) <= abs(part1(extraData))
def test_part2_example():
assert part2(exampleData) == 900
def test_part2_negate_numbers():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(minVal, maxVal)) for _ in range(numDirections)]
invertedData = [(action, -val) for action, val in inputData]
assert part2(inputData) == -part2(invertedData)
def test_part2_up_to_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(0, maxVal)) for _ in range(numDirections)]
commandMap = {'forward': 'forward', 'down': 'down', 'up': 'down'}
invertedData = [(commandMap[action], val) for action, val in inputData]
assert part2(inputData) <= part2(invertedData)
def test_part2_down_to_up():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(0, maxVal)) for _ in range(numDirections)]
commandMap = {'forward': 'forward', 'down': 'up', 'up': 'up'}
invertedData = [(commandMap[action], val) for action, val in inputData]
assert part2(inputData) >= part2(invertedData)
def test_part2_upside_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(commands), randrange(minVal, maxVal)) for _ in range(numDirections)]
commandMap = {'forward': 'forward', 'down': 'up', 'up': 'down'}
invertedData = [(commandMap[action], val) for action, val in inputData]
assert part2(inputData) == -part2(invertedData)
def test_part2_no_forward():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(('down', 'up')), randrange(minVal, maxVal)) for _ in range(numDirections)]
assert part2(inputData) == 0
def test_part2_no_up_or_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [('forward', randrange(minVal, maxVal)) for _ in range(numDirections)]
assert part2(inputData) == 0
def test_part2_no_up():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(('forward', 'down')), randrange(0, maxVal)) for _ in range(numDirections)]
assert part2(inputData) >= 0
def test_part2_no_down():
numDirections = randrange(maxNumDirections + 1)
inputData = [(choice(('forward', 'up')), randrange(0, maxVal)) for _ in range(numDirections)]
assert part2(inputData) <= 0
| 42.137405
| 99
| 0.701812
| 622
| 5,520
| 6.093248
| 0.101286
| 0.036939
| 0.180475
| 0.185224
| 0.867282
| 0.859631
| 0.859631
| 0.857784
| 0.83905
| 0.83905
| 0
| 0.024521
| 0.15779
| 5,520
| 130
| 100
| 42.461538
| 0.790708
| 0
| 0
| 0.514019
| 0
| 0
| 0.053261
| 0
| 0
| 0
| 0
| 0
| 0.186916
| 1
| 0.186916
| false
| 0
| 0.018692
| 0
| 0.205607
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c4880beec156a931ffab3a25df91d21182649194
| 45,005
|
py
|
Python
|
tests/test_libs_crypto_utils.py
|
fyntex/lib-cl-sii-python
|
b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34
|
[
"MIT"
] | 8
|
2020-03-07T19:58:40.000Z
|
2021-12-15T13:47:40.000Z
|
tests/test_libs_crypto_utils.py
|
fyntex/lib-cl-sii-python
|
b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34
|
[
"MIT"
] | 141
|
2020-01-17T22:47:35.000Z
|
2022-03-31T18:29:47.000Z
|
tests/test_libs_crypto_utils.py
|
fyntex/lib-cl-sii-python
|
b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34
|
[
"MIT"
] | 3
|
2020-03-07T20:30:02.000Z
|
2021-03-22T03:14:26.000Z
|
import unittest
from binascii import a2b_hex
from datetime import datetime
import cryptography.hazmat.primitives.hashes
import cryptography.x509
from cryptography.x509 import oid
from cl_sii.libs.crypto_utils import ( # noqa: F401
X509Cert, add_pem_cert_header_footer, load_der_x509_cert, load_pem_x509_cert,
remove_pem_cert_header_footer,
x509_cert_der_to_pem, x509_cert_pem_to_der,
)
from . import utils
# TODO: get fake certificates, keys, and all the variations from
# https://github.com/urllib3/urllib3/tree/1.24.2/dummyserver/certs
# TODO: move me into 'cl_sii/crypto/constants.py'
# - Organismo: MINISTERIO DE ECONOMÍA / SUBSECRETARIA DE ECONOMIA
# - Decreto 181 (Julio-Agosto 2002)
# "APRUEBA REGLAMENTO DE LA LEY 19.799 SOBRE DOCUMENTOS ELECTRONICOS, FIRMA ELECTRONICA
# Y LA CERTIFICACION DE DICHA FIRMA"
# - ref: https://www.leychile.cl/Consulta/m/norma_plana?org=&idNorma=201668
# dice:
# > RUT del titular del certificado : 1.3.6.1.4.1.8321.1
# > RUT de la certificadora emisora : 1.3.6.1.4.1.8321.2
_SII_CERT_CERTIFICADORA_EMISORA_RUT_OID = oid.ObjectIdentifier("1.3.6.1.4.1.8321.2")
_SII_CERT_TITULAR_RUT_OID = oid.ObjectIdentifier("1.3.6.1.4.1.8321.1")
class FunctionsTest(unittest.TestCase):
def test_add_pem_cert_header_footer(self) -> None:
# TODO: implement for function 'add_pem_cert_header_footer'.
pass
def test_remove_pem_cert_header_footer(self) -> None:
# TODO: implement for function 'remove_pem_cert_header_footer'.
pass
class LoadPemX509CertTest(unittest.TestCase):
def test_load_der_x509_cert_ok(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/crypto/wildcard-google-com-cert.der')
x509_cert = load_der_x509_cert(cert_der_bytes)
self.assertIsInstance(x509_cert, X509Cert)
#######################################################################
# main properties
#######################################################################
self.assertEqual(
x509_cert.version,
cryptography.x509.Version.v3)
self.assertIsInstance(
x509_cert.signature_hash_algorithm,
cryptography.hazmat.primitives.hashes.SHA256)
self.assertEqual(
x509_cert.signature_algorithm_oid,
oid.SignatureAlgorithmOID.RSA_WITH_SHA256)
self.assertEqual(
x509_cert.serial_number,
122617997729991213273569581938043448870)
self.assertEqual(
x509_cert.not_valid_after,
datetime(2019, 6, 18, 13, 24))
self.assertEqual(
x509_cert.not_valid_before,
datetime(2019, 3, 26, 13, 40, 40))
#######################################################################
# issuer
#######################################################################
self.assertEqual(len(x509_cert.issuer.rdns), 3)
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'US')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'Google Trust Services')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'Google Internet Authority G3')
#######################################################################
# subject
#######################################################################
self.assertEqual(len(x509_cert.subject.rdns), 5)
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'US')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.STATE_OR_PROVINCE_NAME)[0].value,
'California')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.LOCALITY_NAME)[0].value,
'Mountain View')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'Google LLC')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'*.google.com')
#######################################################################
# extensions
#######################################################################
cert_extensions = x509_cert.extensions
self.assertEqual(len(cert_extensions._extensions), 9)
# BASIC_CONSTRAINTS
basic_constraints_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.BasicConstraints)
self.assertEqual(basic_constraints_ext.critical, True)
self.assertEqual(basic_constraints_ext.value.ca, False)
self.assertIs(basic_constraints_ext.value.path_length, None)
# KEY_USAGE
key_usage_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.KeyUsage)
self.assertEqual(key_usage_ext.critical, True)
self.assertEqual(key_usage_ext.value.content_commitment, False)
self.assertEqual(key_usage_ext.value.crl_sign, False)
self.assertEqual(key_usage_ext.value.data_encipherment, False)
self.assertEqual(key_usage_ext.value.digital_signature, True)
self.assertEqual(key_usage_ext.value.key_agreement, False)
self.assertEqual(key_usage_ext.value.key_cert_sign, False)
self.assertEqual(key_usage_ext.value.key_encipherment, False)
# EXTENDED_KEY_USAGE
extended_key_usage_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.ExtendedKeyUsage)
self.assertEqual(extended_key_usage_ext.critical, False)
self.assertEqual(
extended_key_usage_ext.value._usages,
[oid.ExtendedKeyUsageOID.SERVER_AUTH])
# SUBJECT_ALTERNATIVE_NAME
subject_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectAlternativeName)
self.assertEqual(subject_alt_name_ext.critical, False)
self.assertEqual(len(subject_alt_name_ext.value._general_names._general_names), 67)
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].value,
'*.google.com')
# AUTHORITY_INFORMATION_ACCESS
authority_information_access_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.AuthorityInformationAccess)
self.assertEqual(authority_information_access_ext.critical, False)
self.assertEqual(len(authority_information_access_ext.value._descriptions), 2)
# SUBJECT_KEY_IDENTIFIER
subject_key_identifier_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectKeyIdentifier)
self.assertEqual(subject_key_identifier_ext.critical, False)
self.assertEqual(
subject_key_identifier_ext.value.digest,
b'\xcf\x02\xda\x1aM\x80\x92\xff\x04E\xff\xcb7\x81\xe3O\x1d\x85\xb6\xb6')
# AUTHORITY_KEY_IDENTIFIER
authority_key_identifier_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.AuthorityKeyIdentifier)
self.assertEqual(authority_key_identifier_ext.critical, False)
self.assertIs(authority_key_identifier_ext.value.authority_cert_issuer, None)
self.assertIs(authority_key_identifier_ext.value.authority_cert_serial_number, None)
self.assertEqual(
authority_key_identifier_ext.value.key_identifier,
b'w\xc2\xb8P\x9agvv\xb1-\xc2\x86\xd0\x83\xa0~\xa6~\xbaK'
)
# CERTIFICATE_POLICIES
certificate_policies_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CertificatePolicies)
self.assertEqual(certificate_policies_ext.critical, False)
self.assertSetEqual(
{policy_info.policy_identifier.dotted_string for policy_info in
certificate_policies_ext.value._policies},
{
# 'Google Trust Services'
# https://github.com/zmap/constants/blob/0816f6f/x509/certificate_policies.csv#L34
'1.3.6.1.4.1.11129.2.5.3',
# 'CA/B Forum Organization Validated'
# https://github.com/zmap/constants/blob/0816f6f/x509/certificate_policies.csv#L193
'2.23.140.1.2.2',
}
)
# CRL_DISTRIBUTION_POINTS
crl_distribution_points_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CRLDistributionPoints)
self.assertEqual(crl_distribution_points_ext.critical, False)
self.assertEqual(len(crl_distribution_points_ext.value._distribution_points), 1)
self.assertEqual(
crl_distribution_points_ext.value._distribution_points[0].full_name[0].value,
'http://crl.pki.goog/GTSGIAG3.crl')
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].crl_issuer, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].reasons, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].relative_name, None)
def test_load_der_x509_cert_ok_cert_real_dte_1(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-cert.der')
x509_cert = load_der_x509_cert(cert_der_bytes)
self.assertIsInstance(x509_cert, X509Cert)
#######################################################################
# main properties
#######################################################################
self.assertEqual(
x509_cert.version,
cryptography.x509.Version.v3)
self.assertIsInstance(
x509_cert.signature_hash_algorithm,
cryptography.hazmat.primitives.hashes.SHA1)
self.assertEqual(
x509_cert.signature_algorithm_oid,
oid.SignatureAlgorithmOID.RSA_WITH_SHA1)
self.assertEqual(
x509_cert.serial_number,
232680798042554446173213)
self.assertEqual(
x509_cert.not_valid_after,
datetime(2020, 9, 3, 21, 11, 12))
self.assertEqual(
x509_cert.not_valid_before,
datetime(2017, 9, 4, 21, 11, 12))
#######################################################################
# issuer
#######################################################################
self.assertEqual(len(x509_cert.issuer.rdns), 7)
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'CL')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.STATE_OR_PROVINCE_NAME)[0].value,
'Region Metropolitana')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.LOCALITY_NAME)[0].value,
'Santiago')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'E-CERTCHILE')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value,
'Autoridad Certificadora')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'E-CERTCHILE CA FIRMA ELECTRONICA SIMPLE')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.EMAIL_ADDRESS)[0].value,
'sclientes@e-certchile.cl')
#######################################################################
# subject
#######################################################################
self.assertEqual(len(x509_cert.subject.rdns), 7)
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'CL')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.STATE_OR_PROVINCE_NAME)[0].value,
'VALPARAISO ')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.LOCALITY_NAME)[0].value,
'Quillota')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'Servicios Bonilla y Lopez y Cia. Ltda.')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value,
'Ingeniería y Construcción')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'Ramon humberto Lopez Jara')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.EMAIL_ADDRESS)[0].value,
'enaconltda@gmail.com')
#######################################################################
# extensions
#######################################################################
cert_extensions = x509_cert.extensions
self.assertEqual(len(cert_extensions._extensions), 9)
# KEY_USAGE
key_usage_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.KeyUsage)
self.assertEqual(key_usage_ext.critical, False)
self.assertEqual(key_usage_ext.value.content_commitment, True)
self.assertEqual(key_usage_ext.value.crl_sign, False)
self.assertEqual(key_usage_ext.value.data_encipherment, True)
self.assertEqual(key_usage_ext.value.digital_signature, True)
self.assertEqual(key_usage_ext.value.key_agreement, False)
self.assertEqual(key_usage_ext.value.key_cert_sign, False)
self.assertEqual(key_usage_ext.value.key_encipherment, True)
# ISSUER_ALTERNATIVE_NAME
issuer_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.IssuerAlternativeName)
self.assertEqual(issuer_alt_name_ext.critical, False)
self.assertEqual(len(issuer_alt_name_ext.value._general_names._general_names), 1)
self.assertEqual(
issuer_alt_name_ext.value._general_names._general_names[0].type_id,
_SII_CERT_CERTIFICADORA_EMISORA_RUT_OID)
self.assertEqual(
issuer_alt_name_ext.value._general_names._general_names[0].value,
b'\x16\n96928180-5')
# SUBJECT_ALTERNATIVE_NAME
subject_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectAlternativeName)
self.assertEqual(subject_alt_name_ext.critical, False)
self.assertEqual(len(subject_alt_name_ext.value._general_names._general_names), 1)
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].type_id,
_SII_CERT_TITULAR_RUT_OID)
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].value,
b'\x16\n13185095-6')
# AUTHORITY_INFORMATION_ACCESS
authority_information_access_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.AuthorityInformationAccess)
self.assertEqual(authority_information_access_ext.critical, False)
self.assertEqual(len(authority_information_access_ext.value._descriptions), 1)
self.assertEqual(
authority_information_access_ext.value._descriptions[0].access_location.value,
'http://ocsp.ecertchile.cl/ocsp')
self.assertEqual(
authority_information_access_ext.value._descriptions[0].access_method,
oid.AuthorityInformationAccessOID.OCSP)
# SUBJECT_KEY_IDENTIFIER
subject_key_identifier_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectKeyIdentifier)
self.assertEqual(subject_key_identifier_ext.critical, False)
self.assertEqual(
subject_key_identifier_ext.value.digest,
a2b_hex('D5:D5:47:84:5D:14:55:EE:D1:5C:8C:F8:72:39:77:FD:57:B0:FA:AA'.replace(':', '')))
# AUTHORITY_KEY_IDENTIFIER
authority_key_identifier_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.AuthorityKeyIdentifier)
self.assertEqual(authority_key_identifier_ext.critical, False)
self.assertIs(authority_key_identifier_ext.value.authority_cert_issuer, None)
self.assertIs(authority_key_identifier_ext.value.authority_cert_serial_number, None)
self.assertEqual(
authority_key_identifier_ext.value.key_identifier,
a2b_hex('78:E1:3E:9F:D2:12:B3:7A:3C:8D:CD:30:0E:53:B3:43:29:07:B3:55'.replace(':', '')))
# CERTIFICATE_POLICIES
certificate_policies_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CertificatePolicies)
self.assertEqual(certificate_policies_ext.critical, False)
self.assertEqual(len(certificate_policies_ext.value._policies), 1)
# note: parent of OID '1.3.6.1.4.1.8658.5' is '1.3.6.1.4.1.42346'
# ("Empresa Nacional de Certificacion Electronica ").
# http://oidref.com/1.3.6.1.4.1.8658
# http://oid-info.com/get/1.3.6.1.4.1.8658
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_identifier,
oid.ObjectIdentifier("1.3.6.1.4.1.8658.5"))
self.assertEqual(len(certificate_policies_ext.value._policies[0].policy_qualifiers), 2)
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_qualifiers[0],
"http://www.e-certchile.cl/CPS.htm")
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_qualifiers[1],
cryptography.x509.extensions.UserNotice(
notice_reference=None,
explicit_text="Certificado Firma Simple. Ha sido validado en forma presencial, "
"quedando habilitado el Certificado para uso tributario"))
# CRL_DISTRIBUTION_POINTS
crl_distribution_points_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CRLDistributionPoints)
self.assertEqual(crl_distribution_points_ext.critical, False)
self.assertEqual(len(crl_distribution_points_ext.value._distribution_points), 1)
self.assertEqual(
crl_distribution_points_ext.value._distribution_points[0].full_name[0].value,
'http://crl.e-certchile.cl/ecertchilecaFES.crl')
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].crl_issuer, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].reasons, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].relative_name, None)
#######################################################################
# extra extensions
#######################################################################
# "Microsoft" / "Microsoft CertSrv Infrastructure" / "szOID_CERTIFICATE_TEMPLATE"
# See:
# http://oidref.com/1.3.6.1.4.1.311.21.7
# https://support.microsoft.com/en-ae/help/287547/object-ids-associated-with-microsoft-cryptography
some_microsoft_extension_oid = oid.ObjectIdentifier("1.3.6.1.4.1.311.21.7")
some_microsoft_ext = cert_extensions.get_extension_for_oid(some_microsoft_extension_oid)
self.assertEqual(some_microsoft_ext.critical, False)
self.assertTrue(isinstance(some_microsoft_ext.value.value, bytes))
def test_load_der_x509_cert_ok_cert_real_dte_3(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--60910000-1--33--2336600-cert.der')
x509_cert = load_der_x509_cert(cert_der_bytes)
self.assertIsInstance(x509_cert, X509Cert)
#######################################################################
# main properties
#######################################################################
self.assertEqual(
x509_cert.version,
cryptography.x509.Version.v3)
self.assertIsInstance(
x509_cert.signature_hash_algorithm,
cryptography.hazmat.primitives.hashes.SHA256)
self.assertEqual(
x509_cert.signature_algorithm_oid,
oid.SignatureAlgorithmOID.RSA_WITH_SHA256)
self.assertEqual(
x509_cert.serial_number,
6504844188525727926)
self.assertEqual(
x509_cert.not_valid_after,
datetime(2019, 9, 6, 21, 13, 0))
self.assertEqual(
x509_cert.not_valid_before,
datetime(2018, 9, 6, 21, 13, 0))
#######################################################################
# issuer
#######################################################################
self.assertEqual(len(x509_cert.issuer.rdns), 5)
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'CL')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'E-Sign S.A.')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value,
'Terms of use at www.esign-la.com/acuerdoterceros')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'E-Sign Class 2 Firma Tributaria CA')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.EMAIL_ADDRESS)[0].value,
'e-sign@esign-la.com')
#######################################################################
# subject
#######################################################################
self.assertEqual(len(x509_cert.subject.rdns), 5)
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'CL')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'E-Sign S.A.')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value,
'Terms of use at www.esign-la.com/acuerdoterceros')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'Jorge Enrique Cabello Ortiz')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.EMAIL_ADDRESS)[0].value,
'jcabello@nic.cl')
#######################################################################
# extensions
#######################################################################
cert_extensions = x509_cert.extensions
self.assertEqual(len(cert_extensions._extensions), 10)
# KEY_USAGE
key_usage_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.KeyUsage)
self.assertEqual(key_usage_ext.critical, True)
self.assertEqual(key_usage_ext.value.content_commitment, False)
self.assertEqual(key_usage_ext.value.crl_sign, False)
self.assertEqual(key_usage_ext.value.data_encipherment, False)
self.assertEqual(key_usage_ext.value.digital_signature, True)
self.assertEqual(key_usage_ext.value.key_agreement, False)
self.assertEqual(key_usage_ext.value.key_cert_sign, False)
self.assertEqual(key_usage_ext.value.key_encipherment, True)
# ISSUER_ALTERNATIVE_NAME
issuer_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.IssuerAlternativeName)
self.assertEqual(issuer_alt_name_ext.critical, False)
self.assertEqual(len(issuer_alt_name_ext.value._general_names._general_names), 1)
self.assertEqual(
issuer_alt_name_ext.value._general_names._general_names[0].type_id,
_SII_CERT_CERTIFICADORA_EMISORA_RUT_OID)
self.assertEqual(
issuer_alt_name_ext.value._general_names._general_names[0].value,
b'\x16\n99551740-K')
# SUBJECT_ALTERNATIVE_NAME
subject_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectAlternativeName)
self.assertEqual(subject_alt_name_ext.critical, False)
self.assertEqual(len(subject_alt_name_ext.value._general_names._general_names), 1)
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].type_id,
_SII_CERT_TITULAR_RUT_OID)
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].value,
b'\x16\t8480437-1')
# AUTHORITY_INFORMATION_ACCESS
authority_information_access_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.AuthorityInformationAccess)
self.assertEqual(authority_information_access_ext.critical, False)
self.assertEqual(len(authority_information_access_ext.value._descriptions), 2)
self.assertEqual(
authority_information_access_ext.value._descriptions[0].access_location.value,
'http://pki.esign-la.com/cacerts/pkiClass2FirmaTributariaCA.crt')
self.assertEqual(
authority_information_access_ext.value._descriptions[0].access_method,
oid.AuthorityInformationAccessOID.CA_ISSUERS)
self.assertEqual(
authority_information_access_ext.value._descriptions[1].access_location.value,
'http://ocsp.esign-la.com')
self.assertEqual(
authority_information_access_ext.value._descriptions[1].access_method,
oid.AuthorityInformationAccessOID.OCSP)
# SUBJECT_KEY_IDENTIFIER
subject_key_identifier_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectKeyIdentifier)
self.assertEqual(subject_key_identifier_ext.critical, False)
self.assertEqual(
subject_key_identifier_ext.value.digest,
a2b_hex('E9:FE:44:7A:91:0A:F0:40:F2:9D:86:B4:E2:4C:F6:FA:1D:07:5B:C7'.replace(':', '')))
# AUTHORITY_KEY_IDENTIFIER
authority_key_identifier_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.AuthorityKeyIdentifier)
self.assertEqual(authority_key_identifier_ext.critical, False)
self.assertIs(authority_key_identifier_ext.value.authority_cert_issuer, None)
self.assertIs(authority_key_identifier_ext.value.authority_cert_serial_number, None)
self.assertEqual(
authority_key_identifier_ext.value.key_identifier,
a2b_hex('F9:4A:FA:C2:C7:6E:C2:E7:12:9C:57:45:35:84:1A:6D:28:E9:4A:A4'.replace(':', '')))
# CERTIFICATE_POLICIES
certificate_policies_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CertificatePolicies)
self.assertEqual(certificate_policies_ext.critical, False)
self.assertEqual(len(certificate_policies_ext.value._policies), 1)
# note: parent of OID '1.3.6.1.4.1.42346.1.4.1.2' is '1.3.6.1.4.1.42346' ("E-SIGN S.A.").
# http://oidref.com/1.3.6.1.4.1.42346
# http://oid-info.com/get/1.3.6.1.4.1.42346
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_identifier,
oid.ObjectIdentifier("1.3.6.1.4.1.42346.1.4.1.2"))
self.assertEqual(len(certificate_policies_ext.value._policies[0].policy_qualifiers), 2)
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_qualifiers[0],
cryptography.x509.extensions.UserNotice(
notice_reference=None,
explicit_text='Certificado para uso Tributario, Comercio, Pagos y Otros'))
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_qualifiers[1],
"http://www.esign-la.com/cps")
# CRL_DISTRIBUTION_POINTS
crl_distribution_points_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CRLDistributionPoints)
self.assertEqual(crl_distribution_points_ext.critical, False)
self.assertEqual(len(crl_distribution_points_ext.value._distribution_points), 1)
self.assertEqual(
crl_distribution_points_ext.value._distribution_points[0].full_name[0].value,
'http://pki.esign-la.com/crl/pkiClass2FirmaTributaria/enduser.crl')
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].crl_issuer, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].reasons, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].relative_name, None)
def test_load_der_x509_cert_ok_prueba_sii(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/prueba-sii-cert.der')
x509_cert = load_der_x509_cert(cert_der_bytes)
self.assertIsInstance(x509_cert, X509Cert)
#######################################################################
# main properties
#######################################################################
self.assertEqual(
x509_cert.version,
cryptography.x509.Version.v3)
self.assertIsInstance(
x509_cert.signature_hash_algorithm,
cryptography.hazmat.primitives.hashes.MD5)
self.assertEqual(
x509_cert.signature_algorithm_oid,
oid.SignatureAlgorithmOID.RSA_WITH_MD5)
self.assertEqual(
x509_cert.serial_number,
131466)
self.assertEqual(
x509_cert.not_valid_after,
datetime(2003, 10, 2, 0, 0))
self.assertEqual(
x509_cert.not_valid_before,
datetime(2002, 10, 2, 19, 11, 59))
#######################################################################
# issuer
#######################################################################
self.assertEqual(len(x509_cert.issuer.rdns), 6)
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'CL')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.STATE_OR_PROVINCE_NAME)[0].value,
'Region Metropolitana')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.LOCALITY_NAME)[0].value,
'Santiago')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'E-CERTCHILE')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value,
'Empresa Nacional de Certificacion Electronica')
self.assertEqual(
x509_cert.issuer.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'E-Certchile CA Intermedia')
#######################################################################
# subject
#######################################################################
self.assertEqual(len(x509_cert.subject.rdns), 7)
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COUNTRY_NAME)[0].value,
'CL')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.STATE_OR_PROVINCE_NAME)[0].value,
'Region Metropolitana')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.LOCALITY_NAME)[0].value,
'Santiago')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATION_NAME)[0].value,
'Servicio de Impuestos Internos')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value,
'Servicio de Impuestos Internos')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)[0].value,
'Wilibaldo Gonzalez Cabrera')
self.assertEqual(
x509_cert.subject.get_attributes_for_oid(oid.NameOID.EMAIL_ADDRESS)[0].value,
'wgonzalez@sii.cl')
#######################################################################
# extensions
#######################################################################
cert_extensions = x509_cert.extensions
self.assertEqual(len(cert_extensions._extensions), 5)
# KEY_USAGE
key_usage_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.KeyUsage)
self.assertEqual(key_usage_ext.critical, False)
self.assertEqual(key_usage_ext.value.content_commitment, True)
self.assertEqual(key_usage_ext.value.crl_sign, False)
self.assertEqual(key_usage_ext.value.data_encipherment, True)
self.assertEqual(key_usage_ext.value.digital_signature, True)
self.assertEqual(key_usage_ext.value.key_agreement, False)
self.assertEqual(key_usage_ext.value.key_cert_sign, False)
self.assertEqual(key_usage_ext.value.key_encipherment, True)
# ISSUER_ALTERNATIVE_NAME
issuer_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.IssuerAlternativeName)
self.assertEqual(issuer_alt_name_ext.critical, False)
self.assertEqual(len(issuer_alt_name_ext.value._general_names._general_names), 1)
self.assertEqual(
issuer_alt_name_ext.value._general_names._general_names[0].type_id,
_SII_CERT_CERTIFICADORA_EMISORA_RUT_OID)
self.assertEqual(
issuer_alt_name_ext.value._general_names._general_names[0].value,
b'\x16\n96928180-5')
# SUBJECT_ALTERNATIVE_NAME
subject_alt_name_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.SubjectAlternativeName)
self.assertEqual(subject_alt_name_ext.critical, False)
self.assertEqual(len(subject_alt_name_ext.value._general_names._general_names), 1)
# TODO: find out where did OID '1.3.6.1.4.1.8658.1' come from.
# Shouldn't it have been equal to '_SII_CERT_TITULAR_RUT_OID'?
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].type_id,
oid.ObjectIdentifier("1.3.6.1.4.1.8658.1"))
self.assertEqual(
subject_alt_name_ext.value._general_names._general_names[0].value,
b'\x16\n07880442-4')
# CERTIFICATE_POLICIES
certificate_policies_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CertificatePolicies)
self.assertEqual(certificate_policies_ext.critical, False)
self.assertEqual(len(certificate_policies_ext.value._policies), 1)
# TODO: find out where did OID '1.3.6.1.4.1.8658.0' come from.
# Perhaps it was '1.3.6.1.4.1.8658'?
# https://oidref.com/1.3.6.1.4.1.8658
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_identifier,
oid.ObjectIdentifier("1.3.6.1.4.1.8658.0"))
self.assertEqual(len(certificate_policies_ext.value._policies[0].policy_qualifiers), 2)
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_qualifiers[0],
"http://www.e-certchile.cl/politica/cps.htm")
self.assertEqual(
certificate_policies_ext.value._policies[0].policy_qualifiers[1].explicit_text,
"El titular ha sido validado en forma presencial, quedando habilitado el Certificado "
"para uso tributario, pagos, comercio u otros")
# CRL_DISTRIBUTION_POINTS
crl_distribution_points_ext = cert_extensions.get_extension_for_class(
cryptography.x509.extensions.CRLDistributionPoints)
self.assertEqual(crl_distribution_points_ext.critical, False)
self.assertEqual(len(crl_distribution_points_ext.value._distribution_points), 1)
self.assertEqual(
crl_distribution_points_ext.value._distribution_points[0].full_name[0].value,
'http://crl.e-certchile.cl/EcertchileCAI.crl')
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].crl_issuer, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].reasons, None)
self.assertIs(crl_distribution_points_ext.value._distribution_points[0].relative_name, None)
def test_load_der_x509_cert_fail_type_error(self) -> None:
with self.assertRaises(TypeError) as cm:
load_der_x509_cert(1)
self.assertEqual(cm.exception.args, ("Value must be bytes.", ))
def test_load_der_x509_cert_fail_value_error(self) -> None:
with self.assertRaises(ValueError) as cm:
load_der_x509_cert(b'hello')
self.assertEqual(
cm.exception.args,
("Unable to load certificate", ))
def test_load_pem_x509_cert_ok(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/crypto/wildcard-google-com-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/crypto/wildcard-google-com-cert.pem')
x509_cert_from_der = load_der_x509_cert(cert_der_bytes)
x509_cert_from_pem = load_pem_x509_cert(cert_pem_bytes)
self.assertIsInstance(x509_cert_from_pem, X509Cert)
self.assertEqual(x509_cert_from_der, x509_cert_from_pem)
def test_load_pem_x509_cert_ok_cert_real_dte_1(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-cert.pem')
x509_cert_from_der = load_der_x509_cert(cert_der_bytes)
x509_cert_from_pem = load_pem_x509_cert(cert_pem_bytes)
self.assertIsInstance(x509_cert_from_pem, X509Cert)
self.assertEqual(x509_cert_from_der, x509_cert_from_pem)
def test_load_pem_x509_cert_ok_cert_real_dte_3(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--60910000-1--33--2336600-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--60910000-1--33--2336600-cert.pem')
x509_cert_from_der = load_der_x509_cert(cert_der_bytes)
x509_cert_from_pem = load_pem_x509_cert(cert_pem_bytes)
self.assertIsInstance(x509_cert_from_pem, X509Cert)
self.assertEqual(x509_cert_from_der, x509_cert_from_pem)
def test_load_pem_x509_cert_ok_prueba_sii(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/prueba-sii-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/prueba-sii-cert.pem')
x509_cert_from_der = load_der_x509_cert(cert_der_bytes)
x509_cert_from_pem = load_pem_x509_cert(cert_pem_bytes)
self.assertIsInstance(x509_cert_from_pem, X509Cert)
self.assertEqual(x509_cert_from_der, x509_cert_from_pem)
def test_load_pem_x509_cert_ok_str_ascii(self) -> None:
cert_pem_str_ascii = utils.read_test_file_str_ascii(
'test_data/crypto/wildcard-google-com-cert.pem')
x509_cert = load_pem_x509_cert(cert_pem_str_ascii)
self.assertIsInstance(x509_cert, X509Cert)
def test_load_pem_x509_cert_ok_str_utf8(self) -> None:
cert_pem_str_utf8 = utils.read_test_file_str_utf8(
'test_data/crypto/wildcard-google-com-cert.pem')
x509_cert = load_pem_x509_cert(cert_pem_str_utf8)
self.assertIsInstance(x509_cert, X509Cert)
def test_load_pem_x509_cert_fail_type_error(self) -> None:
with self.assertRaises(TypeError) as cm:
load_pem_x509_cert(1)
self.assertEqual(cm.exception.args, ("Value must be str or bytes.", ))
def test_load_pem_x509_cert_fail_value_error(self) -> None:
with self.assertRaises(ValueError) as cm:
load_pem_x509_cert('hello')
self.assertEqual(
cm.exception.args,
("Unable to load certificate. See "
"https://cryptography.io/en/latest/faq.html#why-can-t-i-import-my-pem-file "
"for more details.", ))
def test_x509_cert_der_to_pem_pem_to_der_ok_1(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/crypto/wildcard-google-com-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/crypto/wildcard-google-com-cert.pem')
# note: we test the function with a double call because the input PEM data
# may have different line lengths and different line separators.
self.assertEqual(
x509_cert_pem_to_der(x509_cert_der_to_pem(cert_der_bytes)),
x509_cert_pem_to_der(cert_pem_bytes))
def test_x509_cert_der_to_pem_pem_to_der_ok_cert_real_dte_1(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--76354771-K--33--170-cert.pem')
# note: we test the function with a double call because the input PEM data
# may have different line lengths and different line separators.
self.assertEqual(
x509_cert_pem_to_der(x509_cert_der_to_pem(cert_der_bytes)),
x509_cert_pem_to_der(cert_pem_bytes))
def test_x509_cert_der_to_pem_pem_to_der_ok_cert_real_dte_3(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--60910000-1--33--2336600-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/DTE--60910000-1--33--2336600-cert.pem')
# note: we test the function with a double call because the input PEM data
# may have different line lengths and different line separators.
self.assertEqual(
x509_cert_pem_to_der(x509_cert_der_to_pem(cert_der_bytes)),
x509_cert_pem_to_der(cert_pem_bytes))
def test_x509_cert_der_to_pem_pem_to_der_ok_prueba_sii(self) -> None:
cert_der_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/prueba-sii-cert.der')
cert_pem_bytes = utils.read_test_file_bytes(
'test_data/sii-crypto/prueba-sii-cert.pem')
# note: we test the function with a double call because the input PEM data
# may have different line lengths and different line separators.
self.assertEqual(
x509_cert_pem_to_der(x509_cert_der_to_pem(cert_der_bytes)),
x509_cert_pem_to_der(cert_pem_bytes))
def test_x509_cert_der_to_pem_type_error(self) -> None:
with self.assertRaises(TypeError) as cm:
x509_cert_der_to_pem(1)
self.assertEqual(cm.exception.args, ("Value must be bytes.", ))
def test_x509_cert_pem_to_der_type_error(self) -> None:
with self.assertRaises(TypeError) as cm:
x509_cert_pem_to_der(1)
self.assertEqual(cm.exception.args, ("Value must be bytes.", ))
def test_x509_cert_pem_to_der_valuetype_error(self) -> None:
with self.assertRaises(ValueError) as cm:
x509_cert_pem_to_der(b'hello')
self.assertEqual(
cm.exception.args,
(
"Input is not a valid base64 value.",
"Invalid base64-encoded string: number of data characters (5) cannot be 1 more "
"than a multiple of 4",
))
def test_add_pem_cert_header_footer(self) -> None:
# TODO: implement for 'add_pem_cert_header_footer'
pass
def test_remove_pem_cert_header_footer(self) -> None:
# TODO: implement for 'remove_pem_cert_header_footer'
pass
| 48.496767
| 109
| 0.653039
| 5,326
| 45,005
| 5.169546
| 0.101202
| 0.114953
| 0.050376
| 0.060981
| 0.899793
| 0.88443
| 0.873279
| 0.866923
| 0.852359
| 0.838775
| 0
| 0.047732
| 0.206777
| 45,005
| 927
| 110
| 48.549083
| 0.723522
| 0.073303
| 0
| 0.762887
| 0
| 0.011782
| 0.092275
| 0.040002
| 0
| 0
| 0
| 0.001079
| 0.372607
| 1
| 0.036819
| false
| 0.005891
| 0.013255
| 0
| 0.053019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
672026d23f633e90bfb45b3f23b117bbc3d2519d
| 22,842
|
py
|
Python
|
nndet/io/augmentation/bg_aug.py
|
Gyanachand1/nnDetection
|
07f22a0af719a491002aa5088ab8b0e64eeda27e
|
[
"BSD-3-Clause"
] | 1
|
2021-06-08T03:19:59.000Z
|
2021-06-08T03:19:59.000Z
|
nndet/io/augmentation/bg_aug.py
|
Gyanachand1/nnDetection
|
07f22a0af719a491002aa5088ab8b0e64eeda27e
|
[
"BSD-3-Clause"
] | null | null | null |
nndet/io/augmentation/bg_aug.py
|
Gyanachand1/nnDetection
|
07f22a0af719a491002aa5088ab8b0e64eeda27e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Sequence, List
from loguru import logger
from nndet.io.augmentation.base import AugmentationSetup, get_patch_size
from batchgenerators.transforms import (
DataChannelSelectionTransform,
SegChannelSelectionTransform,
SpatialTransform,
GammaTransform,
MirrorTransform,
Compose,
BrightnessMultiplicativeTransform,
ContrastAugmentationTransform,
GaussianNoiseTransform,
GaussianBlurTransform,
SimulateLowResolutionTransform,
RenameTransform,
NumpyToTensor,
CenterCropTransform,
)
from batchgenerators.transforms.color_transforms import BrightnessTransform
from batchgenerators.transforms.utility_transforms import RemoveLabelTransform
from nnunet.training.data_augmentation.custom_transforms import (
Convert3DTo2DTransform,
Convert2DTo3DTransform,
MaskTransform,
)
from nndet.io.augmentation import AUGMENTATION_REGISTRY
@AUGMENTATION_REGISTRY.register
class NoAug(AugmentationSetup):
def __init__(self, patch_size: Sequence[int], params: dict) -> None:
super().__init__(patch_size, params)
self.dummy_2d = self.params.get("dummy_2D", False)
if self.dummy_2d:
logger.info("Running dummy 2d augmentation transforms!")
if self.dummy_2d:
self._spatial_transform_patch_size = self.patch_size[1:]
else:
self._spatial_transform_patch_size = self.patch_size
def get_patch_size_generator(self) -> List[int]:
"""
Compute patch size to extract from volume to avoid augmentation
artifacts
"""
_patch_size = list(get_patch_size(
patch_size=self._spatial_transform_patch_size,
rot_x=self.params['rotation_x'],
rot_y=self.params['rotation_y'],
rot_z=self.params['rotation_z'],
scale_range=self.params['scale_range'],
))
if self.dummy_2d:
_patch_size = [self.patch_size[0]] + _patch_size
return _patch_size
def get_training_transforms(self):
tr_transforms = []
if self.params.get("selected_data_channels"):
tr_transforms.append(DataChannelSelectionTransform(
self.params.get("selected_data_channels")))
if self.params.get("selected_seg_channels"):
tr_transforms.append(SegChannelSelectionTransform(
self.params.get("selected_seg_channels")))
tr_transforms.append(CenterCropTransform(self.patch_size))
tr_transforms.append(RemoveLabelTransform(-1, 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
return Compose(tr_transforms)
def get_validation_transforms(self):
val_transforms = []
if self.params.get("selected_data_channels"):
val_transforms.append(DataChannelSelectionTransform(
self.params.get("selected_data_channels")))
if self.params.get("selected_seg_channels"):
val_transforms.append(SegChannelSelectionTransform(
self.params.get("selected_seg_channels")))
val_transforms.append(CenterCropTransform(self.patch_size))
val_transforms.append(RemoveLabelTransform(-1, 0))
val_transforms.append(RenameTransform('seg', 'target', True))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
return Compose(val_transforms)
@AUGMENTATION_REGISTRY.register
class DefaultAug(NoAug):
def get_training_transforms(self):
assert self.params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if self.params.get("selected_data_channels"):
tr_transforms.append(DataChannelSelectionTransform(
self.params.get("selected_data_channels")))
if self.params.get("selected_seg_channels"):
tr_transforms.append(SegChannelSelectionTransform(
self.params.get("selected_seg_channels")))
if self.params.get("dummy_2D", False):
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
tr_transforms.append(Convert3DTo2DTransform())
tr_transforms.append(SpatialTransform(
self._spatial_transform_patch_size,
patch_center_dist_from_border=None,
do_elastic_deform=self.params.get("do_elastic"),
alpha=self.params.get("elastic_deform_alpha"),
sigma=self.params.get("elastic_deform_sigma"),
do_rotation=self.params.get("do_rotation"),
angle_x=self.params.get("rotation_x"),
angle_y=self.params.get("rotation_y"),
angle_z=self.params.get("rotation_z"),
do_scale=self.params.get("do_scaling"),
scale=self.params.get("scale_range"),
order_data=self.params.get("order_data"),
border_mode_data=self.params.get("border_mode_data"),
border_cval_data=self.params.get("border_cval_data"),
order_seg=self.params.get("order_seg"),
border_mode_seg=self.params.get("border_mode_seg"),
border_cval_seg=self.params.get("border_cval_seg"),
random_crop=self.params.get("random_crop"),
p_el_per_sample=self.params.get("p_eldef"),
p_scale_per_sample=self.params.get("p_scale"),
p_rot_per_sample=self.params.get("p_rot"),
independent_scale_for_each_axis=self.params.get("independent_scale_factor_for_each_axis"),
))
if self.params.get("dummy_2D", False):
tr_transforms.append(Convert2DTo3DTransform())
if self.params.get("do_gamma", False):
tr_transforms.append(
GammaTransform(self.params.get("gamma_range"), False, True,
retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=self.params["p_gamma"])
)
if self.params.get("do_mirror", False):
tr_transforms.append(MirrorTransform(self.params.get("mirror_axes")))
if self.params.get("use_mask_for_norm"):
use_mask_for_norm = self.params.get("use_mask_for_norm")
tr_transforms.append(MaskTransform(use_mask_for_norm, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
return Compose(tr_transforms)
@AUGMENTATION_REGISTRY.register
class BaseMoreAug(NoAug):
def get_training_transforms(self):
assert self.params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if self.params.get("selected_data_channels"):
tr_transforms.append(DataChannelSelectionTransform(
self.params.get("selected_data_channels")))
if self.params.get("selected_seg_channels"):
tr_transforms.append(SegChannelSelectionTransform(
self.params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if self.params.get("dummy_2D", False):
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
else:
ignore_axes = None
tr_transforms.append(SpatialTransform(
self._spatial_transform_patch_size,
patch_center_dist_from_border=None,
do_elastic_deform=self.params.get("do_elastic"),
alpha=self.params.get("elastic_deform_alpha"),
sigma=self.params.get("elastic_deform_sigma"),
do_rotation=self.params.get("do_rotation"),
angle_x=self.params.get("rotation_x"),
angle_y=self.params.get("rotation_y"),
angle_z=self.params.get("rotation_z"),
do_scale=self.params.get("do_scaling"),
scale=self.params.get("scale_range"),
order_data=self.params.get("order_data"),
border_mode_data=self.params.get("border_mode_data"),
border_cval_data=self.params.get("border_cval_data"),
order_seg=self.params.get("order_seg"),
border_mode_seg=self.params.get("border_mode_seg"),
border_cval_seg=self.params.get("border_cval_seg"),
random_crop=self.params.get("random_crop"),
p_el_per_sample=self.params.get("p_eldef"),
p_scale_per_sample=self.params.get("p_scale"),
p_rot_per_sample=self.params.get("p_rot"),
independent_scale_for_each_axis=self.params.get("independent_scale_factor_for_each_axis"),
))
if self.params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
# we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color
# channel gets in the way
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
tr_transforms.append(GaussianBlurTransform((0.5, 1.),
different_sigma_per_channel=True,
p_per_sample=0.2,
p_per_channel=0.5))
tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25),
p_per_sample=0.15))
if self.params.get("do_additive_brightness"):
tr_transforms.append(BrightnessTransform(
self.params.get("additive_brightness_mu"),
self.params.get("additive_brightness_sigma"),
True,
p_per_sample=self.params.get("additive_brightness_p_per_sample"),
p_per_channel=self.params.get("additive_brightness_p_per_channel")))
tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
tr_transforms.append(GammaTransform(
self.params.get("gamma_range"), True, True, retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=0.1)) # inverted gamma
if self.params.get("do_gamma"):
tr_transforms.append(GammaTransform(
self.params.get("gamma_range"),
False,
True,
retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=self.params["p_gamma"]))
if self.params.get("do_mirror") or self.params.get("mirror"):
tr_transforms.append(MirrorTransform(self.params.get("mirror_axes")))
if self.params.get("use_mask_for_norm"):
use_mask_for_norm = self.params.get("use_mask_for_norm")
tr_transforms.append(MaskTransform(use_mask_for_norm, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
return Compose(tr_transforms)
@AUGMENTATION_REGISTRY.register
class MoreAug(NoAug):
def get_training_transforms(self):
assert self.params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if self.params.get("selected_data_channels"):
tr_transforms.append(DataChannelSelectionTransform(
self.params.get("selected_data_channels")))
if self.params.get("selected_seg_channels"):
tr_transforms.append(SegChannelSelectionTransform(
self.params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if self.params.get("dummy_2D", False):
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
else:
ignore_axes = None
tr_transforms.append(SpatialTransform(
self._spatial_transform_patch_size,
patch_center_dist_from_border=None,
do_elastic_deform=self.params.get("do_elastic"),
alpha=self.params.get("elastic_deform_alpha"),
sigma=self.params.get("elastic_deform_sigma"),
do_rotation=self.params.get("do_rotation"),
angle_x=self.params.get("rotation_x"),
angle_y=self.params.get("rotation_y"),
angle_z=self.params.get("rotation_z"),
do_scale=self.params.get("do_scaling"),
scale=self.params.get("scale_range"),
order_data=self.params.get("order_data"),
border_mode_data=self.params.get("border_mode_data"),
border_cval_data=self.params.get("border_cval_data"),
order_seg=self.params.get("order_seg"),
border_mode_seg=self.params.get("border_mode_seg"),
border_cval_seg=self.params.get("border_cval_seg"),
random_crop=self.params.get("random_crop"),
p_el_per_sample=self.params.get("p_eldef"),
p_scale_per_sample=self.params.get("p_scale"),
p_rot_per_sample=self.params.get("p_rot"),
independent_scale_for_each_axis=self.params.get("independent_scale_factor_for_each_axis"),
))
if self.params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
# we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color
# channel gets in the way
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
tr_transforms.append(GaussianBlurTransform((0.5, 1.),
different_sigma_per_channel=True,
p_per_sample=0.2,
p_per_channel=0.5))
tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25),
p_per_sample=0.15))
if self.params.get("do_additive_brightness"):
tr_transforms.append(BrightnessTransform(
self.params.get("additive_brightness_mu"),
self.params.get("additive_brightness_sigma"),
True,
p_per_sample=self.params.get("additive_brightness_p_per_sample"),
p_per_channel=self.params.get("additive_brightness_p_per_channel")))
tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1),
per_channel=True,
p_per_channel=0.5,
order_downsample=0,
order_upsample=3,
p_per_sample=0.25,
ignore_axes=ignore_axes,
))
tr_transforms.append(GammaTransform(
self.params.get("gamma_range"),
True,
True,
retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=0.1)) # inverted gamma
if self.params.get("do_gamma"):
tr_transforms.append(GammaTransform(
self.params.get("gamma_range"),
False,
True,
retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=self.params["p_gamma"]))
if self.params.get("do_mirror") or self.params.get("mirror"):
tr_transforms.append(MirrorTransform(self.params.get("mirror_axes")))
if self.params.get("use_mask_for_norm"):
use_mask_for_norm = self.params.get("use_mask_for_norm")
tr_transforms.append(MaskTransform(use_mask_for_norm,
mask_idx_in_seg=0,
set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
return Compose(tr_transforms)
@AUGMENTATION_REGISTRY.register
class InsaneAug(NoAug):
def get_training_transforms(self):
assert self.params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if self.params.get("selected_data_channels"):
tr_transforms.append(DataChannelSelectionTransform(
self.params.get("selected_data_channels")))
if self.params.get("selected_seg_channels"):
tr_transforms.append(SegChannelSelectionTransform(
self.params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if self.params.get("dummy_2D", False):
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
else:
ignore_axes = None
tr_transforms.append(SpatialTransform(
self._spatial_transform_patch_size,
patch_center_dist_from_border=None,
do_elastic_deform=self.params.get("do_elastic"),
alpha=self.params.get("elastic_deform_alpha"),
sigma=self.params.get("elastic_deform_sigma"),
do_rotation=self.params.get("do_rotation"),
angle_x=self.params.get("rotation_x"),
angle_y=self.params.get("rotation_y"),
angle_z=self.params.get("rotation_z"),
do_scale=self.params.get("do_scaling"),
scale=self.params.get("scale_range"),
order_data=self.params.get("order_data"),
border_mode_data=self.params.get("border_mode_data"),
border_cval_data=self.params.get("border_cval_data"),
order_seg=self.params.get("order_seg"),
border_mode_seg=self.params.get("border_mode_seg"),
border_cval_seg=self.params.get("border_cval_seg"),
random_crop=self.params.get("random_crop"),
p_el_per_sample=self.params.get("p_eldef"),
p_scale_per_sample=self.params.get("p_scale"),
p_rot_per_sample=self.params.get("p_rot"),
independent_scale_for_each_axis=self.params.get("independent_scale_factor_for_each_axis"),
))
if self.params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
# we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color
# channel gets in the way
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15))
tr_transforms.append(GaussianBlurTransform((0.5, 1.5),
different_sigma_per_channel=True,
p_per_sample=0.2,
p_per_channel=0.5),
)
tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.3),
p_per_sample=0.15))
if self.params.get("do_additive_brightness"):
tr_transforms.append(BrightnessTransform(
self.params.get("additive_brightness_mu"),
self.params.get("additive_brightness_sigma"),
True,
p_per_sample=self.params.get("additive_brightness_p_per_sample"),
p_per_channel=self.params.get("additive_brightness_p_per_channel")))
tr_transforms.append(ContrastAugmentationTransform(contrast_range=(0.65, 1.5),
p_per_sample=0.15))
tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1),
per_channel=True,
p_per_channel=0.5,
order_downsample=0,
order_upsample=3,
p_per_sample=0.25,
ignore_axes=ignore_axes),
)
tr_transforms.append(GammaTransform(
self.params.get("gamma_range"),
True,
True,
retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=0.15)) # inverted gamma
if self.params.get("do_gamma"):
tr_transforms.append(GammaTransform(
self.params.get("gamma_range"),
False,
True,
retain_stats=self.params.get("gamma_retain_stats"),
p_per_sample=self.params["p_gamma"]))
if self.params.get("do_mirror") or self.params.get("mirror"):
tr_transforms.append(MirrorTransform(self.params.get("mirror_axes")))
if self.params.get("use_mask_for_norm"):
use_mask_for_norm = self.params.get("use_mask_for_norm")
tr_transforms.append(MaskTransform(use_mask_for_norm,
mask_idx_in_seg=0,
set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
return Compose(tr_transforms)
| 46.903491
| 120
| 0.617021
| 2,547
| 22,842
| 5.238712
| 0.091873
| 0.132654
| 0.164656
| 0.039346
| 0.855879
| 0.843663
| 0.832047
| 0.82785
| 0.812411
| 0.803717
| 0
| 0.010639
| 0.283994
| 22,842
| 486
| 121
| 47
| 0.805197
| 0.068383
| 0
| 0.81039
| 0
| 0
| 0.140044
| 0.050403
| 0
| 0
| 0
| 0
| 0.01039
| 1
| 0.020779
| false
| 0
| 0.020779
| 0
| 0.072727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
675891f1fe8ec5a3477548d881fd306eba0ccd65
| 2,049
|
py
|
Python
|
tests/utils/test_commands_util.py
|
bondiolipietro/pipocoin-python
|
e4abc019c2eb704d70899a2e441ee4be23aaeb4c
|
[
"MIT"
] | 1
|
2021-08-05T23:18:35.000Z
|
2021-08-05T23:18:35.000Z
|
tests/utils/test_commands_util.py
|
bondiolipietro/pipocoin-twitter-bot-python
|
e4abc019c2eb704d70899a2e441ee4be23aaeb4c
|
[
"MIT"
] | null | null | null |
tests/utils/test_commands_util.py
|
bondiolipietro/pipocoin-twitter-bot-python
|
e4abc019c2eb704d70899a2e441ee4be23aaeb4c
|
[
"MIT"
] | null | null | null |
import pytest
from pipocoin.utils import commands_util
@pytest.mark.parametrize(
"test_input,expected",
[
("$pipo test - command command_ _command_- test test",
["command", "command_", "_command_"]),
("$pipo test -command __ command- test",
["command", "__", "command"]),
("$pipo test -command-",
["command"]),
("$pipo test - transfer 5,0 pipo - test",
["transfer", "5,0", "pipo"]),
("$pipo test - transfer 5.0 pipo - test",
["transfer", "5.0", "pipo"]),
],
)
def test_get_command_stack(test_input, expected):
assert commands_util.get_command_stack(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("- command command_ _command_-",
["command", "command_", "_command_"]),
("-command __ command-",
["command", "__", "command"]),
("-command-",
["command"]),
],
)
def test_get_command_stack_from_command_string(test_input, expected):
assert commands_util.get_command_stack_from_command_string(
test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("$pipo test - command command_ _command_- test test",
"- command command_ _command_-"),
("$pipo test -command __ command- test",
"-command __ command-"),
("$pipo test -command-",
"-command-"),
],
)
def test_get_command_string_from_status(test_input, expected):
assert commands_util.get_command_string_from_status(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
(["", "test", "0", 8, "", "test"],
["test", "0", 8, "test"]),
(["", "command", "", "", "command_", "", "_command_", ""],
["command", "command_", "_command_"]),
],
)
def test_filter_empty_commands_from_stack(test_input, expected):
assert commands_util.filter_empty_commands_from_stack(
test_input) == expected
| 30.58209
| 79
| 0.580771
| 204
| 2,049
| 5.406863
| 0.127451
| 0.38078
| 0.342702
| 0.304624
| 0.939257
| 0.93291
| 0.917498
| 0.896646
| 0.685403
| 0.435177
| 0
| 0.007864
| 0.255246
| 2,049
| 66
| 80
| 31.045455
| 0.714941
| 0
| 0
| 0.465517
| 0
| 0
| 0.331869
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67bdcfc7c35b8269501baa02c53409a55b1a7382
| 22,404
|
py
|
Python
|
fsdviz/tests/integration_tests/test_permission_elements.py
|
AdamCottrill/fsdivz
|
98dd1f35a08dba26424e2951a40715e01399478c
|
[
"MIT"
] | null | null | null |
fsdviz/tests/integration_tests/test_permission_elements.py
|
AdamCottrill/fsdivz
|
98dd1f35a08dba26424e2951a40715e01399478c
|
[
"MIT"
] | 6
|
2020-02-12T00:03:40.000Z
|
2020-11-30T01:20:56.000Z
|
fsdviz/tests/integration_tests/test_permission_elements.py
|
AdamCottrill/fsdviz
|
98dd1f35a08dba26424e2951a40715e01399478c
|
[
"MIT"
] | null | null | null |
"""=============================================================
~/fsdviz/tests/integration_tests/test_permission_elements.py
Created: 18 Nov 2020 14:30:57
DESCRIPTION:
This test file verifies that element that are presented
conditionally depending on permission/roles associate with the user
are shown or hidden approprately.
There are currently 4 types of authenticated user (plus
anonymous users):
+ GREAT_LAKES_COORDINATOR = "glsc"
+ AGENCY_MANAGER = "am"
+ AGENCY_COORDINATOR = "asc"
+ AGENCY_USER = "au"
agency Manager and agency user currently have the same permissions
+ The Great Lakes Stocking Coordinators are essentially admins and
should be able to:
+ access the admin page
+ CRUD any stocking event
+ CRUD their bookmarks
+ upload events
+ download stocking data
+ am and asc should be able to:
+ NOT access the admin page
+ create, update or delete only the stocking event
associated with their agency and lake(s)
+ upload events
+ CRUD their bookmarks
+ download stocking data
+ agency user should:
+ NOT access the admin page
+ NOT be able to create, update, or delete any stocking events
+ NOT upload events
+ CRUD their bookmarks
+ download stocking data
+ anonymous user should:
+ NOT access the admin page
+ NOT be able to create, update, or delete any stocking events
+ NOT upload events
+ NOT be able to create bookmarks
+ download stocking data(?)
There are two types of object that need to be protected - stocking
events and stocking event uploads.
- effected view:
# group and object level:
stocking.event_detail
stocking.edit_stocking_event
stocking.DataUploadEventListView
stocking.DataUploadEventDetailView
# permissions (group):
stocking.data_uploads
stocking.xls_events
TODO
- create a tempalte tag and verify that buttons and links are
conditionally rendered in templates
- create integration tests and verify that users premissions are
implemented correctly with GET and POST requests
A. Cottrill
=============================================================
"""
import pytest
from django.urls import reverse
from pytest_django.asserts import assertTemplateUsed, assertTemplateNotUsed
from fsdviz.tests.pytest_fixtures import (
user,
glsc,
huron_mdnr_sc,
huron_mdnr_user,
mdnr,
huron,
superior,
usfws,
data_uploads,
stocking_events,
)
# =============================
# ADMIN LINK and URLS
# the django admin should only be vsible if the user "is_staff"
@pytest.mark.django_db
def test_admin_links_for_admin_user(client, admin_user):
"""an user who has been identifeid as an django admin_user should have
the link to the admin page available to them in the navbar.
"""
login = client.login(email=admin_user.email, password="password")
assert login is True
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
url = reverse("admin:index")
assert url in content
response = client.get(url)
content = str(response.content)
assert "Site administration" in content
@pytest.mark.django_db
def test_admin_links_for_user(client, user):
"""an user who has NOT been identifeid as a django admin user should
Not have the link to the admin page available to them in the
navbar, or be able to access the admin page.
"""
login = client.login(email=user.email, password="Abcd1234")
assert login is True
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
url = reverse("admin:index")
assert url not in content
response = client.get(url, follow=True)
assertTemplateUsed(response, "admin/login.html")
@pytest.mark.django_db
def test_admin_links_for_anon_user(client):
"""an anonymous user who should
Not have the link to the admin page available to them in the
navbar, or be able to access the admin page.
"""
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
url = reverse("admin:index")
assert url not in content
response = client.get(url, follow=True)
assertTemplateUsed(response, "admin/login.html")
# =============================
# DATA UPLOAD LINKS
# the links to data upload list and upload data should only be
# presented if the user is a stocking coordinator, they should not be
# visble to agency users or annonomous users
@pytest.mark.django_db
def test_upload_links_for_gl_coordinators(client, glsc):
"""a user who is identified as a great lakes stocking coordinator
should have links to both data upload list and a link to upload new
data."""
login = client.login(email=glsc.email, password="Abcd1234")
assert login is True
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
urls = [
reverse("stocking:data-upload-event-list"),
reverse("stocking:upload-stocking-events"),
]
for url in urls:
assert url in content
@pytest.mark.django_db
def test_upload_links_for_agency_coordinators(client, huron_mdnr_sc):
"""a user who is identified as an agency stocking coordinator
should have links to both data upload list and a link to upload new
data."""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
urls = [
reverse("stocking:data-upload-event-list"),
reverse("stocking:upload-stocking-events"),
]
for url in urls:
assert url in content
@pytest.mark.django_db
def test_upload_links_for_agency_user(client, huron_mdnr_user):
"""a user who is identified as an agency user should NOT have links to
either the data upload list or a link to upload new data.
"""
login = client.login(email=huron_mdnr_user.email, password="Abcd1234")
assert login is True
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
urls = [
reverse("stocking:data-upload-event-list"),
reverse("stocking:upload-stocking-events"),
]
for url in urls:
assert url not in content
@pytest.mark.django_db
def test_upload_links_not_included_for_anon_user(client):
"""an anonymous user should NOT have links to either the data upload list
or a link to upload new data.
"""
url = reverse("stocking:stocking-events-year", kwargs={"year": 2010})
response = client.get(url)
content = str(response.content)
urls = [
reverse("stocking:data-upload-event-list"),
reverse("stocking:upload-stocking-events"),
]
for url in urls:
assert url not in content
# =================================
# UPLOAD EVENT LIST ACCESS
@pytest.mark.django_db
def test_upload_list_accessible_to_gl_coordinators(client, glsc, data_uploads):
"""A user with the role of great lakes stocking coordinator should be
able to access the list of upload events, and it should include all
agencies and all lakes.
"""
login = client.login(email=glsc.email, password="Abcd1234")
assert login is True
url = reverse("stocking:data-upload-event-list")
response = client.get(url)
content = str(response.content)
for upload in data_uploads:
assert upload.slug in content
@pytest.mark.django_db
def test_upload_list_accessible_for_agency_coordinators(
client, huron_mdnr_sc, data_uploads
):
"""A user with the role of agency stocking coordinator should be able
to access the list of upload events, but it should only include
upload events that are associated with their lake(s) and agency.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
url = reverse("stocking:data-upload-event-list")
response = client.get(url)
content = str(response.content)
assert data_uploads[0].slug in content
assert data_uploads[1].slug not in content
assert data_uploads[2].slug not in content
assert data_uploads[3].slug not in content
@pytest.mark.django_db
def test_upload_list_not_accessible_to_agency_user(
client, huron_mdnr_user, data_uploads, stocking_events
):
"""A user with the role of agency stocking user should NOT be able to
access the list of upload events and should be redirected to the default
homepage if they try.
"""
login = client.login(email=huron_mdnr_user.email, password="Abcd1234")
assert login is True
url = reverse("stocking:data-upload-event-list")
response = client.get(url, follow=True)
assertTemplateUsed(response, "stocking/event_piechart_map.html")
content = str(response.content)
# none of the upload event slugs should be in our response
for upload in data_uploads:
assert upload.slug not in content
@pytest.mark.django_db
def test_upload_list_not_accessible_to_anon_user(client, data_uploads):
"""An anonymous user should NOT be able to
access the list of upload events and should be redirected to the
login page if they try.
"""
url = reverse("stocking:data-upload-event-list")
response = client.get(url, follow=True)
assertTemplateUsed(response, "registration/login.html")
content = str(response.content)
# none of the upload event slugs should be in our response
for upload in data_uploads:
assert upload.slug not in content
# =================================
# UPLOAD EVENT DETAIL ACCESS
@pytest.mark.django_db
def test_upload_detail_accessible_to_gl_coordinators(client, glsc, data_uploads):
"""A user with the role of great lakes stocking coordinator should be
able to access the detail of upload events, and it should include all
agencies and all lakes.
"""
login = client.login(email=glsc.email, password="Abcd1234")
assert login is True
url = reverse(
"stocking:data-upload-event-detail", kwargs={"slug": data_uploads[0].slug}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/upload_event_detail.html")
content = str(response.content)
assert "Data Upload Event Detail" in content
assert data_uploads[0].slug in content
@pytest.mark.django_db
def test_upload_detail_accessible_for_agency_coordinators(
client, huron_mdnr_sc, data_uploads
):
"""A user with the role of agency stocking coordinator should be able
to access the detail of upload events, but it should only include
upload events that are associated with their lake(s) and agency.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
url = reverse(
"stocking:data-upload-event-detail", kwargs={"slug": data_uploads[0].slug}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/upload_event_detail.html")
content = str(response.content)
assert "Data Upload Event Detail" in content
assert data_uploads[0].slug in content
@pytest.mark.django_db
def test_upload_detail_other_agency_not_accessible_for_agency_coordinators(
client, huron_mdnr_sc, data_uploads, stocking_events
):
"""A user with the role of agency stocking coordinator not should be able
to access the detail of upload events assoiciated with a different lake or agency.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
# event[0] is theirs, events 1-3 belong to someone else.
for upload in data_uploads[1:]:
url = reverse("stocking:data-upload-event-detail", kwargs={"slug": upload.slug})
response = client.get(url, follow=True)
assertTemplateUsed(response, "stocking/event_piechart_map.html")
content = str(response.content)
assert "Data Upload Event Detail" not in content
assert upload.slug not in content
@pytest.mark.django_db
def test_upload_detail_not_accessible_to_agency_user(
client, huron_mdnr_user, data_uploads, stocking_events
):
"""A user with the role of agency stocking user should NOT be able to
access the detail of upload events and should be redirected to their
homepage if they try.
"""
login = client.login(email=huron_mdnr_user.email, password="Abcd1234")
assert login is True
url = reverse(
"stocking:data-upload-event-detail", kwargs={"slug": data_uploads[0].slug}
)
response = client.get(url, follow=True)
assertTemplateUsed(response, "stocking/event_piechart_map.html")
content = str(response.content)
assert "Data Upload Event Detail" not in content
assert data_uploads[0].slug not in content
@pytest.mark.django_db
def test_upload_detail_not_accessible_for_anon_user(client, data_uploads):
"""An anonymous user should NOT be able to
access the detail of upload events and should be redirected to the
default homepage if they try.
"""
url = reverse(
"stocking:data-upload-event-detail", kwargs={"slug": data_uploads[0].slug}
)
response = client.get(url, follow=True)
assertTemplateUsed(response, "registration/login.html")
content = str(response.content)
assert "Data Upload Event Detail" not in content
assert data_uploads[0].slug not in content
# EVENT DETAIL - EDIT BUTTON
# the edit button should render renders on detail page only if the
# user is logged in and has permission to edit that event.
@pytest.mark.django_db
def test_event_edit_button_rended_for_gl_coordinators(client, glsc, stocking_events):
"""A user with the role of great lakes stocking coordinator should see
the edit event button on every stocking event detail page.
"""
login = client.login(email=glsc.email, password="Abcd1234")
assert login is True
for event in stocking_events:
url = reverse(
"stocking:stocking-event-detail", kwargs={"stock_id": event.stock_id}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
edit_url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
assert edit_url in content
@pytest.mark.django_db
def test_event_edit_button_rended_for_agency_coordinators(
client, huron_mdnr_sc, stocking_events
):
"""A button or link to edit a stocking event should be included in the
response for a user who is role of agency stocking coordinator for
the lake and agency associated with the event.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
event = stocking_events[0]
url = reverse("stocking:stocking-event-detail", kwargs={"stock_id": event.stock_id})
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
edit_url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
assert edit_url in content
@pytest.mark.django_db
def test_event_edit_button_not_rended_for_agency_coordinators(
client, huron_mdnr_sc, stocking_events
):
"""A button or link to edit a stocking event should NOT be included in the
response for a user who is role of agency stocking coordinator for DIFFERENT
lake or agency associated with the event.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
# event[0] is ours, events[1-3] belong to someone else.
for event in stocking_events[1:]:
url = reverse(
"stocking:stocking-event-detail", kwargs={"stock_id": event.stock_id}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
edit_url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
assert edit_url not in content
@pytest.mark.django_db
def test_event_edit_button_not_rended_for_agency_user(
client, huron_mdnr_user, stocking_events
):
"""A button or link to edit a stocking event should NOT be included
in the response for a user who is role of agency user regardless
of the lake or agency associated with the event.
"""
login = client.login(email=huron_mdnr_user.email, password="Abcd1234")
assert login is True
for event in stocking_events:
url = reverse(
"stocking:stocking-event-detail", kwargs={"stock_id": event.stock_id}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
edit_url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
assert edit_url not in content
@pytest.mark.django_db
def test_event_edit_button_not_rended_for_anon_user(client, stocking_events):
"""
A button or link to edit a stocking event should NOT be included
in the response for an anonymus user
"""
for event in stocking_events:
url = reverse(
"stocking:stocking-event-detail", kwargs={"stock_id": event.stock_id}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
edit_url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
assert edit_url not in content
# EDIT STOCKING EVENT FORM
# urls accessible/not accessible
# the edit event detail url should only accessible if the
# user is logged in and has permission to edit that event.
@pytest.mark.django_db
def test_edit_stocking_event_accessible_for_gl_coordinators(
client, glsc, stocking_events
):
"""A user with the role of great lakes stocking coordinator should be able to access
the edit form for any stocking event.
"""
login = client.login(email=glsc.email, password="Abcd1234")
assert login is True
for event in stocking_events:
url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_event_form.html")
content = str(response.content)
assert "Edit Stocking Event {}".format(event.stock_id) in content
@pytest.mark.django_db
def test_edit_stocking_event_accessible_for_agency_coordinators(
client, huron_mdnr_sc, stocking_events
):
"""An agency stocking coordinator should be able to access the edit
form for a stocking event by their agency in the lake they are
responsible for.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
event = stocking_events[0]
url = reverse("stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id})
response = client.get(url)
assertTemplateUsed(response, "stocking/stocking_event_form.html")
content = str(response.content)
assert "Edit Stocking Event {}".format(event.stock_id) in content
@pytest.mark.django_db
def test_edit_stocking_event_form_not_accessible_for_agency_coordinators(
client, huron_mdnr_sc, stocking_events
):
"""An agency stocking coordinator NOT should be able to access the
edit form for a stocking events conducted by other their agencies
or in lakes they are not responsible for.
"""
login = client.login(email=huron_mdnr_sc.email, password="Abcd1234")
assert login is True
# event[0] is ours, events[1-3] belong to someone else.
for event in stocking_events[1:]:
url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
response = client.get(url, follow=True)
assertTemplateNotUsed(response, "stocking/stocking_event_form.html")
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
assert "Edit Stocking Event {}".format(event.stock_id) not in content
@pytest.mark.django_db
def test_event_edit_button_not_rended_for_agency_user(
client, huron_mdnr_user, stocking_events
):
"""The edit stocking event form should not be accessible to regular
agency users, if the do try to access the url, the will be
redirected to the detail page for that stocking event.
"""
login = client.login(email=huron_mdnr_user.email, password="Abcd1234")
assert login is True
for event in stocking_events:
url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
response = client.get(url, follow=True)
assertTemplateNotUsed(response, "stocking/stocking_event_form.html")
assertTemplateUsed(response, "stocking/stocking_detail.html")
content = str(response.content)
assert "Edit Stocking Event {}".format(event.stock_id) not in content
@pytest.mark.django_db
def test_event_edit_button_not_rended_for_anon_user(client, stocking_events):
"""
The edit stocking event form should not be accessible to regular
agency users, if the do try to access the url, the will be
redirected to the login page.
"""
for event in stocking_events:
url = reverse(
"stocking:edit-stocking-event", kwargs={"stock_id": event.stock_id}
)
response = client.get(url, follow=True)
assertTemplateNotUsed(response, "stocking/stocking_event_form.html")
assertTemplateUsed(response, "registration/login.html")
content = str(response.content)
assert "Edit Stocking Event {}".format(event.stock_id) not in content
| 32.754386
| 88
| 0.698938
| 3,034
| 22,404
| 5.030323
| 0.074819
| 0.040034
| 0.036561
| 0.038003
| 0.855327
| 0.843074
| 0.838095
| 0.824925
| 0.80802
| 0.785349
| 0
| 0.008061
| 0.208177
| 22,404
| 683
| 89
| 32.802343
| 0.852255
| 0.325478
| 0
| 0.794798
| 0
| 0
| 0.164821
| 0.120932
| 0
| 0
| 0
| 0.001464
| 0.225434
| 1
| 0.075145
| false
| 0.057803
| 0.011561
| 0
| 0.086705
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
67d25b9985ee5bce038a1e5c719f1fd405bbb82d
| 1,303
|
py
|
Python
|
semantic_admin/widgets/__init__.py
|
vrialland/django-semantic-admin
|
f1799e48ee36b411d22d08cae0c9b686707f543f
|
[
"MIT"
] | null | null | null |
semantic_admin/widgets/__init__.py
|
vrialland/django-semantic-admin
|
f1799e48ee36b411d22d08cae0c9b686707f543f
|
[
"MIT"
] | null | null | null |
semantic_admin/widgets/__init__.py
|
vrialland/django-semantic-admin
|
f1799e48ee36b411d22d08cae0c9b686707f543f
|
[
"MIT"
] | null | null | null |
from .admin import SemanticActionCheckboxInput
from .autocomplete import SemanticAutocompleteSelect, SemanticAutocompleteSelectMultiple
from .widgets import (
SemanticCheckboxInput,
SemanticCheckboxSelectMultiple,
SemanticClearableFileInput,
SemanticDateInput,
SemanticDateRangeWidget,
SemanticDateTimeInput,
SemanticEmailInput,
SemanticFileInput,
SemanticImageInput,
SemanticNumberInput,
SemanticPasswordInput,
SemanticRadioSelect,
SemanticSelect,
SemanticSelectMultiple,
SemanticTextarea,
SemanticTextInput,
SemanticTimeInput,
SemanticTimeRangeWidget,
SemanticURLInput,
)
__all__ = [
"SemanticActionCheckboxInput",
"SemanticAutocompleteSelect",
"SemanticAutocompleteSelectMultiple",
"SemanticCheckboxInput",
"SemanticCheckboxSelectMultiple",
"SemanticClearableFileInput",
"SemanticDateInput",
"SemanticDateRangeWidget",
"SemanticDateTimeInput",
"SemanticEmailInput",
"SemanticFileInput",
"SemanticImageInput",
"SemanticNumberInput",
"SemanticPasswordInput",
"SemanticRadioSelect",
"SemanticSelect",
"SemanticSelectMultiple",
"SemanticTextarea",
"SemanticTextInput",
"SemanticTimeInput",
"SemanticTimeRangeWidget",
"SemanticURLInput",
]
| 26.591837
| 88
| 0.755948
| 54
| 1,303
| 18.166667
| 0.518519
| 0.122324
| 0.156983
| 0.191641
| 0.764526
| 0.764526
| 0.764526
| 0.764526
| 0.764526
| 0.764526
| 0
| 0
| 0.171911
| 1,303
| 48
| 89
| 27.145833
| 0.909175
| 0
| 0
| 0
| 0
| 0
| 0.354566
| 0.210284
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.042553
| 0.06383
| 0
| 0.06383
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e1da21ba661f214d8ca62f0bca5216419ad89667
| 63
|
py
|
Python
|
fastlmmhpc/util/__init__.py
|
epiproject/FaST-LMM-HPC
|
5d6df81268aeff19015194ab0718a9163b8d33af
|
[
"Apache-2.0"
] | 2
|
2019-12-10T09:55:40.000Z
|
2019-12-11T20:58:10.000Z
|
fastlmmhpc/util/__init__.py
|
epiproject/FaST-LMM-HPC
|
5d6df81268aeff19015194ab0718a9163b8d33af
|
[
"Apache-2.0"
] | null | null | null |
fastlmmhpc/util/__init__.py
|
epiproject/FaST-LMM-HPC
|
5d6df81268aeff19015194ab0718a9163b8d33af
|
[
"Apache-2.0"
] | null | null | null |
from fastlmmhpc.util.compute_auto_pcs import compute_auto_pcs
| 31.5
| 62
| 0.888889
| 10
| 63
| 5.2
| 0.7
| 0.423077
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079365
| 63
| 1
| 63
| 63
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c0195d8bd89863bd6df94e914911126e9b944f41
| 92
|
py
|
Python
|
parameters_8000.py
|
Querra/piraoke
|
9fd067dbcf55dd505c7825a0745c74bdbb5d1231
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
Querra/piraoke
|
9fd067dbcf55dd505c7825a0745c74bdbb5d1231
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
Querra/piraoke
|
9fd067dbcf55dd505c7825a0745c74bdbb5d1231
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$b904fa0653f28b91$20e6ee155b46b0132eaebd27656992ee7d3620b7"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.516484
| 0.01087
| 92
| 1
| 92
| 92
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c0239f25f04e4af9868326ad8ed2b2317ecc8533
| 122
|
py
|
Python
|
discord/member.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
discord/member.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
discord/member.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
from disnake.member import *
from disnake.member import __dict__ as __original_dict__
locals().update(__original_dict__)
| 24.4
| 56
| 0.836066
| 16
| 122
| 5.5
| 0.5625
| 0.25
| 0.386364
| 0.522727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 122
| 4
| 57
| 30.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
222cf799090f34b48a48a405aa710187563d29fc
| 669
|
py
|
Python
|
python/testData/inspections/PyCompatibilityInspection/asyncAwait.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/asyncAwait.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/asyncAwait.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 1
|
2019-02-06T14:50:03.000Z
|
2019-02-06T14:50:03.000Z
|
<warning descr="Python versions < 3.5 do not support this syntax">async</warning> def foo(x):
<warning descr="Python version 2.4 doesn't support this syntax."><warning descr="Python versions < 3.5 do not support this syntax">async</warning> with x:
y = <warning descr="Python versions < 3.5 do not support this syntax">await x</warning>
if <warning descr="Python versions < 3.5 do not support this syntax">await y</warning>:
return <warning descr="Python versions < 3.5 do not support this syntax">await z</warning></warning>
<warning descr="Python versions < 3.5 do not support this syntax">async</warning> for y in x:
pass
| 83.625
| 158
| 0.693572
| 106
| 669
| 4.377358
| 0.273585
| 0.181034
| 0.271552
| 0.336207
| 0.756466
| 0.756466
| 0.756466
| 0.756466
| 0.756466
| 0.756466
| 0
| 0.02583
| 0.189836
| 669
| 7
| 159
| 95.571429
| 0.830258
| 0
| 0
| 0
| 0
| 0
| 0.500747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.142857
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
222f51b5089d7b9598757032ad57ab7c9ff379e2
| 40,862
|
py
|
Python
|
pylayers/location/algebraic/hdf.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 143
|
2015-01-09T07:50:20.000Z
|
2022-03-02T11:26:53.000Z
|
pylayers/location/algebraic/hdf.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 148
|
2015-01-13T04:19:34.000Z
|
2022-03-11T23:48:25.000Z
|
pylayers/location/algebraic/hdf.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 95
|
2015-05-01T13:22:42.000Z
|
2022-03-15T11:22:28.000Z
|
from numpy import *
from scipy import *
from scipy import optimize
from numpy.linalg import *
#import cvxmod as cvxm
#import cvxopt as cvxo
from string import *
from crlb import *
from rss import *
from toa import *
from tdoa import *
class HDFLocation(object):
"""
This class regroups methods of localization using HDF of RSSI, TOA, and TDOA
"""
def __init__(self, RN_RSS, RN_ToA, RN_TDoA):
self.RN_RSS = RN_RSS
self.RN_ToA = RN_ToA
self.RN_TDoA = RN_TDoA
def LSHDFLocate(self, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, TDoA, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
c = 3e08
RSSL=RSSLocation(RN_RSS)
if RN_RSS==None:
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
# solution
K=vstack((K_ToA, K_TDoA))
A=vstack((A_ToA, A_TDoA))
P = 0.5*dot(linalg.inv(dot(A.T,A)),dot(A.T,K))
return P[:2,:]
elif RN_ToA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS,
RSSnp, RSSStd, Rest)
# RSS based Ranges (meters) RoA2 =
(RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
# solution
K=vstack((K_RSS, K_TDoA))
A=vstack((A_RSS, A_TDoA))
P = 0.5*dot(linalg.inv(dot(A.T,A)),dot(A.T,K))
return P[:2,:]
elif RN_TDoA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0])
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0])
# solution
K=vstack((K_RSS, K_ToA))
A=vstack((A_RSS, A_ToA))
P = 0.5*dot(linalg.inv(dot(A.T,A)),dot(A.T,K))
return P
else:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_TDoA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
# solution
K=vstack((vstack((K_RSS,K_ToA)), K_TDoA))
A=vstack((vstack((A_RSS,A_ToA)), A_TDoA))
P = 0.5*dot(linalg.inv(dot(A.T,A)),dot(A.T,K))
return P[:2,:]
def TLSHDFLocate(self, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, TDoA, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
c = 3e08
RSSL=RSSLocation(RN_RSS)
if RN_RSS==None:
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
# solution
K=vstack((K_ToA, K_TDoA))
A=vstack((A_ToA, A_TDoA))
A2 = dot(transpose(A),A)
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(A.T,K))
return P[:2,:]
elif RN_ToA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
# solution
K=vstack((K_RSS, K_TDoA))
A=vstack((A_RSS, A_TDoA))
A2 = dot(transpose(A),A)
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(A.T,K))
return P[:2,:]
elif RN_TDoA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0])
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0])
# solution
K=vstack((K_RSS, K_ToA))
A=vstack((A_RSS, A_ToA))
A2 = dot(transpose(A),A)
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(A.T,K))
return P
else:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_TDoA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
# solution
K=vstack((vstack((K_RSS,K_ToA)), K_TDoA))
A=vstack((vstack((A_RSS,A_ToA)), A_TDoA))
A2 = dot(transpose(A),A)
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(A.T,K))
return P[:2,:]
def WLSHDFLocate(self, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
c = 3e08
RSSL=RSSLocation(RN_RSS)
if RN_RSS==None:
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoAStd = c*ToAStd
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
C_ToA = RoAStd[1:RNnum_ToA,0]**2
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoAStd = c*TDoAStd
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
C_TDoA = RDoAStd[:,0]**2
# solution
K=vstack((K_ToA, K_TDoA))
A=vstack((A_ToA, A_TDoA))
C=diag(hstack((C_ToA, C_TDoA)))
P = 0.5*dot(linalg.inv(dot(A.T,dot(linalg.inv(C),A))),dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
elif RN_ToA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoAStd = RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
C_RSS = RoAStd[1:RNnum_RSS,0]**2
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoAStd = c*TDoAStd
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
C_TDoA = RDoAStd[:,0]**2
# solution
K=vstack((K_RSS, K_TDoA))
A=vstack((A_RSS, A_TDoA))
C=diag(hstack((C_RSS, C_TDoA)))
P = 0.5*dot(linalg.inv(dot(A.T,dot(linalg.inv(C),A))),dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
elif RN_TDoA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoAStd = RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
C_RSS = RoAStd[1:RNnum_RSS,0]**2
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoAStd = c*ToAStd
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
C_ToA = RoAStd[1:RNnum_ToA,0]**2
# solution
K=vstack((K_RSS, K_ToA))
A=vstack((A_RSS, A_ToA))
C=diag(hstack((C_RSS, C_ToA)))
P = 0.5*dot(linalg.inv(dot(A.T,dot(linalg.inv(C),A))),dot(dot(A.T,linalg.inv(C)),K))
return P
else:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoAStd = RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
C_RSS = RoAStd[1:RNnum_RSS,0]**2
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoAStd = c*ToAStd
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
C_ToA = RoAStd[1:RNnum_ToA,0]**2
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_TDoA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoAStd = c*TDoAStd
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
C_TDoA = RDoAStd[:,0]**2
# solution
K=vstack((vstack((K_RSS,K_ToA)), K_TDoA))
A=vstack((vstack((A_RSS,A_ToA)), A_TDoA))
C=diag(hstack((hstack((C_RSS,C_ToA)), C_TDoA)))
P = 0.5*dot(linalg.inv(dot(A.T,dot(linalg.inv(C),A))),dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
def TWLSHDFLocate(self, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
c = 3e08
RSSL=RSSLocation(RN_RSS)
if RN_RSS==None:
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoAStd = c*ToAStd
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
C_ToA = RoAStd[1:RNnum_ToA,0]**2
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoAStd = c*TDoAStd
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
C_TDoA = RDoAStd[:,0]**2
# solution
K=vstack((K_ToA, K_TDoA))
A=vstack((A_ToA, A_TDoA))
C=diag(hstack((C_ToA, C_TDoA)))
A2 = dot(A.T,dot(linalg.inv(C),A))
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
elif RN_ToA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoAStd = RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
C_RSS = RoAStd[1:RNnum_RSS,0]**2
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoAStd = c*TDoAStd
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
C_TDoA = RDoAStd[:,0]**2
# solution
K=vstack((K_RSS, K_TDoA))
A=vstack((A_RSS, A_TDoA))
C=diag(hstack((C_RSS, C_TDoA)))
A2 = dot(A.T,dot(linalg.inv(C),A))
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
elif RN_TDoA==None:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoAStd = RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
C_RSS = RoAStd[1:RNnum_RSS,0]**2
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoAStd = c*ToAStd
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
C_ToA = RoAStd[1:RNnum_ToA,0]**2
# solution
K=vstack((K_RSS, K_ToA))
A=vstack((A_RSS, A_ToA))
C=diag(hstack((C_RSS, C_ToA)))
A2 = dot(A.T,dot(linalg.inv(C),A))
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
else:
# for RSS
shRN_RSS = shape(RN_RSS)
RNnum_RSS = shRN_RSS[1]
RN_RSS2 = (sum(RN_RSS*RN_RSS,axis=0)).reshape(RNnum_RSS,1)
RoA = RSSL.getRange(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest) # RSS based Ranges (meters)
RoAStd = RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)
RoA2 = (RoA*RoA).reshape(RNnum_RSS,1)
K_RSS = RN_RSS2[1:RNnum_RSS,:]-RN_RSS2[0,0] + RoA2[0,0]-RoA2[1:RNnum_RSS,:]
A_RSS = hstack((RN_RSS[:,1:RNnum_RSS].T - RN_RSS[:,0].reshape(1,shRN_RSS[0]), zeros((RNnum_RSS-1,1))))
C_RSS = RoAStd[1:RNnum_RSS,0]**2
# for ToA
shRN_ToA = shape(RN_ToA)
RNnum_ToA = shRN_ToA[1]
RN_ToA2 = (sum(RN_ToA*RN_ToA,axis=0)).reshape(RNnum_ToA,1)
RoA = c*ToA
RoAStd = c*ToAStd
RoA2 = (RoA*RoA).reshape(RNnum_ToA,1)
K_ToA = RN_ToA2[1:RNnum_ToA,:]-RN_ToA2[0,0] + RoA2[0,0]-RoA2[1:RNnum_ToA,:]
A_ToA = hstack((RN_ToA[:,1:RNnum_ToA].T - RN_ToA[:,0].reshape(1,shRN_ToA[0]), zeros((RNnum_ToA-1,1))))
C_ToA = RoAStd[1:RNnum_ToA,0]**2
# for TDoA
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_TDoA[:,0:1]*ones((1,RNnum_TDoA))
RDoA = c*TDoA
RDoAStd = c*TDoAStd
RDoA2 = (RDoA*RDoA).reshape(RNnum_TDoA,1)
K_TDoA = (sum((RN_TDoA-RN_TDoA2)*(RN_TDoA-RN_TDoA2),axis=0)).reshape(RNnum_TDoA,1)-RDoA2
A_TDoA = hstack((RN_TDoA.T - RN_TDoA2.T,0.5*RoA[0,0]*RDoA))
C_TDoA = RDoAStd[:,0]**2
# solution
K=vstack((vstack((K_RSS,K_ToA)), K_TDoA))
A=vstack((vstack((A_RSS,A_ToA)), A_TDoA))
C=diag(hstack((hstack((C_RSS,C_ToA)), C_TDoA)))
A2 = dot(A.T,dot(linalg.inv(C),A))
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A2))>=max(RSSL.getRangeStd(RN_RSS, PL0, d0, RSS, RSSnp, RSSStd, Rest)):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
P = 0.5*dot(A2i,dot(dot(A.T,linalg.inv(C)),K))
return P[:2,:]
def HDFOptimizer(self, P, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
RSSL=RSSLocation(RN_RSS)
TOAL=ToALocation(RN_ToA)
TDOAL=TDoALocation(RN_TDoA)
if RN_RSS==None:
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
fopt=TOAL.ToAOptimizer(P, RN_ToA, ToA, ToAStd) + TDOAL.TDoAOptimizer(P, RN_TDoA, RN_TDoA2, TDoA, TDoAStd)
return fopt
elif RN_ToA==None:
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
fopt=RSSL.DRSSOptimizer(P, RN_RSS, PL0, d0, RSS, RSSnp, RSSStd) + TDOAL.TDoAOptimizer(P, RN_TDoA, RN_TDoA2, TDoA, TDoAStd)
return fopt
elif RN_TDoA==None:
fopt=RSSL.DRSSOptimizer(P, RN_RSS, PL0, d0, RSS, RSSnp, RSSStd) + TOAL.ToAOptimizer(P, RN_ToA, ToA, ToAStd)
return fopt
else:
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_TDoA[:,0:1]*ones((1,RNnum_TDoA))
fopt=RSSL.DRSSOptimizer(P, RN_RSS, PL0, d0, RSS, RSSnp, RSSStd) + TOAL.ToAOptimizer(P, RN_ToA, ToA, ToAStd) + TDOAL.TDoAOptimizer(P, RN_TDoA, RN_TDoA2, TDoA, TDoAStd)
return fopt
def MLHDFLocate(self, P, P0, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
P = optimize.fmin(self.HDFOptimizer,P0,args=(RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, RSS, RSSnp, RSSStd, Rest),xtol=1e-10,ftol=1e-10)
return P.reshape(shape(P0))
def SDPHDFLocate(self, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, RSS, RSSnp, RSSStd, Rest):
"""
This applies LS approximation to get position P.
Return P
"""
c = 3e08
if RN_RSS==None:
RN_ToA = cvxm.matrix(RN_ToA)
ToA = cvxm.matrix(ToA)
RoA = c*ToA
RoAStd = c*ToAStd
RoAStd = cvxm.matrix(RoAStd)
mtoa,ntoa=cvxm.size(RN_ToA)
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
RN_TDoA = cvxm.matrix(RN_TDoA)
RN_TDoA2 = cvxm.matrix(RN_TDoA2)
TDoA = cvxm.matrix(TDoA)
RDoA = c*TDoA
RDoAStd=cvxm.matrix(c*TDoAStd)
mtdoa,ntdoa=cvxm.size(RN_TDoA)
Im = cvxm.eye(mtoa)
Y=cvxm.optvar('Y',mtoa+1,mtoa+1)
t=cvxm.optvar('t',ntoa+ntdoa,1)
prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
prob.constr.append(Y>=0)
prob.constr.append(Y[mtoa,mtoa]==1)
for i in range(ntoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_ToA[:,i])],[-RN_ToA[:,i], cvxm.transpose(RN_ToA[:,i])*RN_ToA[:,i]]])
prob.constr.append(-t[i]<(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
prob.constr.append(t[i]>(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
for i in range(ntdoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_TDoA[:,i])],[-RN_TDoA[:,i], cvxm.transpose(RN_TDoA[:,i])*RN_TDoA[:,i]]])
X1=cvxm.matrix([[Im, -cvxm.transpose(RN_TDoA2[:,i])],[-RN_TDoA2[:,i], cvxm.transpose(RN_TDoA2[:,i])*RN_TDoA2[:,i]]])
prob.constr.append(-RDoAStd[i,0]*t[i]<cvxm.trace((X1-X0)*Y)-RDoA[i,0]**2)
prob.constr.append(RDoAStd[i,0]*t[i]>cvxm.trace((X1-X0)*Y)-RDoA[i,0]**2)
prob.solve()
Pval=Y.value
X_cvx=Pval[:2,-1]
return X_cvx
elif RN_ToA==None:
RN_RSS=cvxm.matrix(RN_RSS)
RSS=cvxm.matrix(RSS.T)
RSSnp=cvxm.matrix(RSSnp.T)
RSSStd=cvxm.matrix(RSSStd.T)
PL0=cvxm.matrix(PL0)
mrss,nrss=cvxm.size(RN_RSS)
Si = array([(1/d0**2)*10**((RSS[0,0]-PL0[0,0])/(5.0*RSSnp[0,0])),(1/d0**2)*10**((RSS[0,1]-PL0[1,0])/(5.0*RSSnp[0,1])),(1/d0**2)*10**((RSS[0,2]-PL0[2,0])/(5.0*RSSnp[0,2])),(1/d0**2)*10**((RSS[0,3]-PL0[3,0])/(5.0*RSSnp[0,3]))])
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
RN_TDoA = cvxm.matrix(RN_TDoA)
RN_TDoA2 = cvxm.matrix(RN_TDoA2)
TDoA = cvxm.matrix(TDoA)
RDoA = c*TDoA
RDoAStd=cvxm.matrix(c*TDoAStd)
mtdoa,ntdoa=cvxm.size(RN_TDoA)
Im = cvxm.eye(mrss)
Y=cvxm.optvar('Y',mrss+1,mrss+1)
t=cvxm.optvar('t',nrss+ntdoa,1)
prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
prob.constr.append(Y>=0)
prob.constr.append(Y[mrss,mrss]==1)
for i in range(nrss):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_RSS[:,i])],[-RN_RSS[:,i], cvxm.transpose(RN_RSS[:,i])*RN_RSS[:,i]]])
prob.constr.append(-RSSStd[0,i]*t[i]<Si[i]*cvxm.trace(X0*Y)-1)
prob.constr.append(RSSStd[0,i]*t[i]>Si[i]*cvxm.trace(X0*Y)-1)
for i in range(ntdoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_TDoA[:,i])],[-RN_TDoA[:,i], cvxm.transpose(RN_TDoA[:,i])*RN_TDoA[:,i]]])
X1=cvxm.matrix([[Im, -cvxm.transpose(RN_TDoA2[:,i])],[-RN_TDoA2[:,i], cvxm.transpose(RN_TDoA2[:,i])*RN_TDoA2[:,i]]])
prob.constr.append(-RDoAStd[i,0]*t[i]<cvxm.trace((X1-X0)*Y)-RDoA[i,0]**2)
prob.constr.append(RDoAStd[i,0]*t[i]>cvxm.trace((X1-X0)*Y)-RDoA[i,0]**2)
prob.solve()
Pval=Y.value
X_cvx=Pval[:2,-1]
return X_cvx
elif RN_TDoA==None:
RN_RSS=cvxm.matrix(RN_RSS)
RSS=cvxm.matrix(RSS.T)
RSSnp=cvxm.matrix(RSSnp.T)
RSSStd=cvxm.matrix(RSSStd.T)
PL0=cvxm.matrix(PL0)
mrss,nrss=cvxm.size(RN_RSS)
Si = array([(1/d0**2)*10**((RSS[0,0]-PL0[0,0])/(5.0*RSSnp[0,0])),(1/d0**2)*10**((RSS[0,1]-PL0[1,0])/(5.0*RSSnp[0,1])),(1/d0**2)*10**((RSS[0,2]-PL0[2,0])/(5.0*RSSnp[0,2])),(1/d0**2)*10**((RSS[0,3]-PL0[3,0])/(5.0*RSSnp[0,3]))])
RN_ToA = cvxm.matrix(RN_ToA)
ToA = cvxm.matrix(ToA)
RoA = c*ToA
RoAStd = c*ToAStd
RoAStd = cvxm.matrix(RoAStd)
mtoa,ntoa=cvxm.size(RN_ToA)
Im = cvxm.eye(mrss)
Y=cvxm.optvar('Y',mrss+1,mrss+1)
t=cvxm.optvar('t',nrss+ntoa,1)
prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
prob.constr.append(Y>=0)
prob.constr.append(Y[mrss,mrss]==1)
for i in range(nrss):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_RSS[:,i])],[-RN_RSS[:,i], cvxm.transpose(RN_RSS[:,i])*RN_RSS[:,i]]])
prob.constr.append(-RSSStd[0,i]*t[i]<Si[i]*cvxm.trace(X0*Y)-1)
prob.constr.append(RSSStd[0,i]*t[i]>Si[i]*cvxm.trace(X0*Y)-1)
for i in range(ntoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_ToA[:,i])],[-RN_ToA[:,i], cvxm.transpose(RN_ToA[:,i])*RN_ToA[:,i]]])
prob.constr.append(-t[i]<(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
prob.constr.append(t[i]>(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
prob.solve()
Pval=Y.value
X_cvx=Pval[:2,-1]
return X_cvx
else:
RN_RSS=cvxm.matrix(RN_RSS)
RSS=cvxm.matrix(RSS.T)
RSSnp=cvxm.matrix(RSSnp.T)
RSSStd=cvxm.matrix(RSSStd.T)
PL0=cvxm.matrix(PL0)
mrss,nrss=cvxm.size(RN_RSS)
Si = array([(1/d0**2)*10**((RSS[0,0]-PL0[0,0])/(5.0*RSSnp[0,0])),(1/d0**2)*10**((RSS[0,1]-PL0[1,0])/(5.0*RSSnp[0,1])),(1/d0**2)*10**((RSS[0,2]-PL0[2,0])/(5.0*RSSnp[0,2])),(1/d0**2)*10**((RSS[0,3]-PL0[3,0])/(5.0*RSSnp[0,3]))])
RN_ToA = cvxm.matrix(RN_ToA)
ToA = cvxm.matrix(ToA)
RoA = c*ToA
RoAStd = c*ToAStd
RoAStd = cvxm.matrix(RoAStd)
mtoa,ntoa=cvxm.size(RN_ToA)
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
RN_TDoA = cvxm.matrix(RN_TDoA)
RN_TDoA2 = cvxm.matrix(RN_TDoA2)
TDoA = cvxm.matrix(TDoA)
RDoA = c*TDoA
RDoAStd=cvxm.matrix(c*TDoAStd)
mtdoa,ntdoa=cvxm.size(RN_TDoA)
Im = cvxm.eye(mrss)
Y=cvxm.optvar('Y',mrss+1,mrss+1)
t=cvxm.optvar('t',nrss+ntoa+ntdoa,1)
prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
prob.constr.append(Y>=0)
prob.constr.append(Y[mrss,mrss]==1)
for i in range(nrss):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_RSS[:,i])],[-RN_RSS[:,i], cvxm.transpose(RN_RSS[:,i])*RN_RSS[:,i]]])
prob.constr.append(-RSSStd[0,i]*t[i]<Si[i]*cvxm.trace(X0*Y)-1)
prob.constr.append(RSSStd[0,i]*t[i]>Si[i]*cvxm.trace(X0*Y)-1)
for i in range(ntoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_ToA[:,i])],[-RN_ToA[:,i], cvxm.transpose(RN_ToA[:,i])*RN_ToA[:,i]]])
prob.constr.append(-t[i]<(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
prob.constr.append(t[i]>(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
for i in range(ntdoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN_TDoA[:,i])],[-RN_TDoA[:,i], cvxm.transpose(RN_TDoA[:,i])*RN_TDoA[:,i]]])
X1=cvxm.matrix([[Im, -cvxm.transpose(RN_TDoA2[:,i])],[-RN_TDoA2[:,i], cvxm.transpose(RN_TDoA2[:,i])*RN_TDoA2[:,i]]])
prob.constr.append(-RDoAStd[i,0]*t[i]<cvxm.trace((X1-X0)*Y)-RDoA[i,0]**2)
prob.constr.append(RDoAStd[i,0]*t[i]>cvxm.trace((X1-X0)*Y)-RDoA[i,0]**2)
prob.solve()
Pval=Y.value
X_cvx=Pval[:2,-1]
return X_cvx
def CRBHDFLocate(self, P, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, ToAStd, TDoAStd, PL0, d0, RSSnp, RSSStd):
"""
This applies LS approximation to get position P.
Return P
"""
CRBL=CRBLocation(None)
if RN_RSS==None:
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_ToA[:,0:1]*ones((1,RNnum_TDoA))
return sqrt(CRBL.CRB_TOA_TDOA_fim(P, RN_ToA, RN_TDoA, RN_TDoA2,ToAStd, TDoAStd))
elif RN_ToA==None:
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_RSS[:,0:1]*ones((1,RNnum_TDoA))
return sqrt(CRBL.CRB_RSS_TDOA_fim(P, RN_RSS, RN_TDoA, RN_TDoA2, RSSnp, RSSStd, TDoAStd))
elif RN_TDoA==None:
return sqrt(CRBL.CRB_RSS_TOA_fim(P, RN_RSS, RN_ToA, RSSnp, RSSStd, ToAStd))
else:
shRN_TDoA = shape(RN_TDoA)
RNnum_TDoA = shRN_TDoA[1]
#RN_TDoA2= RN_TDoA[:,0:1]*ones((1,RNnum_TDoA))
return sqrt(CRBL.CRB_RSS_TOA_TDOA_fim(P, RN_RSS, RN_ToA, RN_TDoA, RN_TDoA2, RSSnp, RSSStd, ToAStd, TDoAStd))
| 41.191532
| 238
| 0.489991
| 6,070
| 40,862
| 3.118616
| 0.027677
| 0.038299
| 0.015214
| 0.028843
| 0.961807
| 0.960539
| 0.95552
| 0.954253
| 0.951611
| 0.950343
| 0
| 0.051503
| 0.350448
| 40,862
| 991
| 239
| 41.233098
| 0.661706
| 0.055773
| 0
| 0.933432
| 0
| 0
| 0.000215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013314
| false
| 0
| 0.013314
| 0
| 0.071006
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
224a42ae9d16f657b6322884efef0b7fd568567c
| 26
|
py
|
Python
|
example/python2/test/scan/expr1.py
|
rocky/python-spark
|
d3f966a4e8c191c51b1dcfa444026b4c6831984f
|
[
"MIT"
] | 43
|
2016-04-24T15:20:16.000Z
|
2022-03-19T21:01:29.000Z
|
example/python2/test/scan/expr1.py
|
rocky/python-spark
|
d3f966a4e8c191c51b1dcfa444026b4c6831984f
|
[
"MIT"
] | 11
|
2016-06-01T16:06:38.000Z
|
2020-05-20T20:15:32.000Z
|
example/python2/test/scan/expr1.py
|
rocky/python-spark
|
d3f966a4e8c191c51b1dcfa444026b4c6831984f
|
[
"MIT"
] | 12
|
2016-05-24T12:15:04.000Z
|
2021-11-20T02:14:00.000Z
|
(10.5 + 2 / 30) // 3 >> 1
| 13
| 25
| 0.307692
| 6
| 26
| 1.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.470588
| 0.346154
| 26
| 1
| 26
| 26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
225495b1592093122a00be2e841d731e5ed8168a
| 2,441
|
py
|
Python
|
exams/61a-su20-mt/q6/q6.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | 1
|
2022-01-22T11:45:01.000Z
|
2022-01-22T11:45:01.000Z
|
exams/61a-su20-mt/q6/q6.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | null | null | null |
exams/61a-su20-mt/q6/q6.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | null | null | null |
email = 'example_key'
def copycat(lst1, lst2):
"""
Write a function `copycat` that takes in two lists.
`lst1` is a list of strings
`lst2` is a list of integers
It returns a new list where every element from `lst1` is copied the
number of times as the corresponding element in `lst2`. If the number
of times to be copied is negative (-k), then it removes the previous
k elements added.
Note 1: `lst1` and `lst2` do not have to be the same length, simply ignore
any extra elements in the longer list.
Note 2: you can assume that you will never be asked to delete more
elements than exist
>>> copycat(['a', 'b', 'c'], [1, 2, 3])
['a', 'b', 'b', 'c', 'c', 'c']
>>> copycat(['a', 'b', 'c'], [3])
['a', 'a', 'a']
>>> copycat(['a', 'b', 'c'], [0, 2, 0])
['b', 'b']
>>> copycat([], [1,2,3])
[]
>>> copycat(['a', 'b', 'c'], [1, -1, 3])
['c', 'c', 'c']
"""
def copycat_helper(______, ______, ______):
if ______:
return ______
if ______:
______ = ______
else:
______ = ______[:______]
return ______
return ______
# ORIGINAL SKELETON FOLLOWS
# def copycat(lst1, lst2):
# """
# Write a function `copycat` that takes in two lists.
# `lst1` is a list of strings
# `lst2` is a list of integers
# It returns a new list where every element from `lst1` is copied the
# number of times as the corresponding element in `lst2`. If the number
# of times to be copied is negative (-k), then it removes the previous
# k elements added.
# Note 1: `lst1` and `lst2` do not have to be the same length, simply ignore
# any extra elements in the longer list.
# Note 2: you can assume that you will never be asked to delete more
# elements than exist
# >>> copycat(['a', 'b', 'c'], [1, 2, 3])
# ['a', 'b', 'b', 'c', 'c', 'c']
# >>> copycat(['a', 'b', 'c'], [3])
# ['a', 'a', 'a']
# >>> copycat(['a', 'b', 'c'], [0, 2, 0])
# ['b', 'b']
# >>> copycat([], [1,2,3])
# []
# >>> copycat(['a', 'b', 'c'], [1, -1, 3])
# ['c', 'c', 'c']
# """
# def copycat_helper(______, ______, ______):
# if ______:
# return ______
# if ______:
# ______ = ______
# else:
# ______ = ______[:______]
# return ______
# return ______
| 29.768293
| 80
| 0.525604
| 322
| 2,441
| 3.490683
| 0.23913
| 0.017794
| 0.064057
| 0.071174
| 0.966192
| 0.966192
| 0.966192
| 0.966192
| 0.966192
| 0.966192
| 0
| 0.027155
| 0.306022
| 2,441
| 81
| 81
| 30.135802
| 0.636364
| 0.814011
| 0
| 0.454545
| 0
| 0
| 0.033846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f137c27492c898756a1c4af42de1b5aa33e488d
| 47
|
py
|
Python
|
test_example.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-xmzzzz
|
2c86b3991e7df66b976fdcf5eb47c5ffbf3b8b85
|
[
"MIT"
] | null | null | null |
test_example.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-xmzzzz
|
2c86b3991e7df66b976fdcf5eb47c5ffbf3b8b85
|
[
"MIT"
] | 1
|
2018-10-09T20:12:22.000Z
|
2018-10-09T20:12:22.000Z
|
test_example.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-xmzzzz
|
2c86b3991e7df66b976fdcf5eb47c5ffbf3b8b85
|
[
"MIT"
] | null | null | null |
def test_example():
return 0
test_example()
| 9.4
| 19
| 0.723404
| 7
| 47
| 4.571429
| 0.714286
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.170213
| 47
| 4
| 20
| 11.75
| 0.794872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
3f3af8b3d03d0f98993846005584d516c765ee15
| 76
|
py
|
Python
|
src/model/__init__.py
|
ssaru/pytorch-XNOR-YOLO
|
2c02429d6ee052fe70a17ced4755d5814a4ef60a
|
[
"MIT"
] | 1
|
2021-05-22T09:29:44.000Z
|
2021-05-22T09:29:44.000Z
|
src/model/__init__.py
|
ssaru/pytorch-XNOR-YOLO
|
2c02429d6ee052fe70a17ced4755d5814a4ef60a
|
[
"MIT"
] | null | null | null |
src/model/__init__.py
|
ssaru/pytorch-XNOR-YOLO
|
2c02429d6ee052fe70a17ced4755d5814a4ef60a
|
[
"MIT"
] | null | null | null |
from src.model.xnor_yolo import XnorNetYolo
from src.model.yolo import Yolo
| 25.333333
| 43
| 0.842105
| 13
| 76
| 4.846154
| 0.538462
| 0.222222
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 44
| 38
| 0.926471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3f50d0851f25920744ca2246906b3a18cd95f799
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_aatrox/na_aatrox_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_aatrox/na_aatrox_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_aatrox/na_aatrox_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Aatrox_Top_Aatrox(Ratings):
pass
class NA_Aatrox_Top_Ahri(Ratings):
pass
class NA_Aatrox_Top_Akali(Ratings):
pass
class NA_Aatrox_Top_Alistar(Ratings):
pass
class NA_Aatrox_Top_Amumu(Ratings):
pass
class NA_Aatrox_Top_Anivia(Ratings):
pass
class NA_Aatrox_Top_Annie(Ratings):
pass
class NA_Aatrox_Top_Ashe(Ratings):
pass
class NA_Aatrox_Top_AurelionSol(Ratings):
pass
class NA_Aatrox_Top_Azir(Ratings):
pass
class NA_Aatrox_Top_Bard(Ratings):
pass
class NA_Aatrox_Top_Blitzcrank(Ratings):
pass
class NA_Aatrox_Top_Brand(Ratings):
pass
class NA_Aatrox_Top_Braum(Ratings):
pass
class NA_Aatrox_Top_Caitlyn(Ratings):
pass
class NA_Aatrox_Top_Camille(Ratings):
pass
class NA_Aatrox_Top_Cassiopeia(Ratings):
pass
class NA_Aatrox_Top_Chogath(Ratings):
pass
class NA_Aatrox_Top_Corki(Ratings):
pass
class NA_Aatrox_Top_Darius(Ratings):
pass
class NA_Aatrox_Top_Diana(Ratings):
pass
class NA_Aatrox_Top_Draven(Ratings):
pass
class NA_Aatrox_Top_DrMundo(Ratings):
pass
class NA_Aatrox_Top_Ekko(Ratings):
pass
class NA_Aatrox_Top_Elise(Ratings):
pass
class NA_Aatrox_Top_Evelynn(Ratings):
pass
class NA_Aatrox_Top_Ezreal(Ratings):
pass
class NA_Aatrox_Top_Fiddlesticks(Ratings):
pass
class NA_Aatrox_Top_Fiora(Ratings):
pass
class NA_Aatrox_Top_Fizz(Ratings):
pass
class NA_Aatrox_Top_Galio(Ratings):
pass
class NA_Aatrox_Top_Gangplank(Ratings):
pass
class NA_Aatrox_Top_Garen(Ratings):
pass
class NA_Aatrox_Top_Gnar(Ratings):
pass
class NA_Aatrox_Top_Gragas(Ratings):
pass
class NA_Aatrox_Top_Graves(Ratings):
pass
class NA_Aatrox_Top_Hecarim(Ratings):
pass
class NA_Aatrox_Top_Heimerdinger(Ratings):
pass
class NA_Aatrox_Top_Illaoi(Ratings):
pass
class NA_Aatrox_Top_Irelia(Ratings):
pass
class NA_Aatrox_Top_Ivern(Ratings):
pass
class NA_Aatrox_Top_Janna(Ratings):
pass
class NA_Aatrox_Top_JarvanIV(Ratings):
pass
class NA_Aatrox_Top_Jax(Ratings):
pass
class NA_Aatrox_Top_Jayce(Ratings):
pass
class NA_Aatrox_Top_Jhin(Ratings):
pass
class NA_Aatrox_Top_Jinx(Ratings):
pass
class NA_Aatrox_Top_Kalista(Ratings):
pass
class NA_Aatrox_Top_Karma(Ratings):
pass
class NA_Aatrox_Top_Karthus(Ratings):
pass
class NA_Aatrox_Top_Kassadin(Ratings):
pass
class NA_Aatrox_Top_Katarina(Ratings):
pass
class NA_Aatrox_Top_Kayle(Ratings):
pass
class NA_Aatrox_Top_Kayn(Ratings):
pass
class NA_Aatrox_Top_Kennen(Ratings):
pass
class NA_Aatrox_Top_Khazix(Ratings):
pass
class NA_Aatrox_Top_Kindred(Ratings):
pass
class NA_Aatrox_Top_Kled(Ratings):
pass
class NA_Aatrox_Top_KogMaw(Ratings):
pass
class NA_Aatrox_Top_Leblanc(Ratings):
pass
class NA_Aatrox_Top_LeeSin(Ratings):
pass
class NA_Aatrox_Top_Leona(Ratings):
pass
class NA_Aatrox_Top_Lissandra(Ratings):
pass
class NA_Aatrox_Top_Lucian(Ratings):
pass
class NA_Aatrox_Top_Lulu(Ratings):
pass
class NA_Aatrox_Top_Lux(Ratings):
pass
class NA_Aatrox_Top_Malphite(Ratings):
pass
class NA_Aatrox_Top_Malzahar(Ratings):
pass
class NA_Aatrox_Top_Maokai(Ratings):
pass
class NA_Aatrox_Top_MasterYi(Ratings):
pass
class NA_Aatrox_Top_MissFortune(Ratings):
pass
class NA_Aatrox_Top_MonkeyKing(Ratings):
pass
class NA_Aatrox_Top_Mordekaiser(Ratings):
pass
class NA_Aatrox_Top_Morgana(Ratings):
pass
class NA_Aatrox_Top_Nami(Ratings):
pass
class NA_Aatrox_Top_Nasus(Ratings):
pass
class NA_Aatrox_Top_Nautilus(Ratings):
pass
class NA_Aatrox_Top_Nidalee(Ratings):
pass
class NA_Aatrox_Top_Nocturne(Ratings):
pass
class NA_Aatrox_Top_Nunu(Ratings):
pass
class NA_Aatrox_Top_Olaf(Ratings):
pass
class NA_Aatrox_Top_Orianna(Ratings):
pass
class NA_Aatrox_Top_Ornn(Ratings):
pass
class NA_Aatrox_Top_Pantheon(Ratings):
pass
class NA_Aatrox_Top_Poppy(Ratings):
pass
class NA_Aatrox_Top_Quinn(Ratings):
pass
class NA_Aatrox_Top_Rakan(Ratings):
pass
class NA_Aatrox_Top_Rammus(Ratings):
pass
class NA_Aatrox_Top_RekSai(Ratings):
pass
class NA_Aatrox_Top_Renekton(Ratings):
pass
class NA_Aatrox_Top_Rengar(Ratings):
pass
class NA_Aatrox_Top_Riven(Ratings):
pass
class NA_Aatrox_Top_Rumble(Ratings):
pass
class NA_Aatrox_Top_Ryze(Ratings):
pass
class NA_Aatrox_Top_Sejuani(Ratings):
pass
class NA_Aatrox_Top_Shaco(Ratings):
pass
class NA_Aatrox_Top_Shen(Ratings):
pass
class NA_Aatrox_Top_Shyvana(Ratings):
pass
class NA_Aatrox_Top_Singed(Ratings):
pass
class NA_Aatrox_Top_Sion(Ratings):
pass
class NA_Aatrox_Top_Sivir(Ratings):
pass
class NA_Aatrox_Top_Skarner(Ratings):
pass
class NA_Aatrox_Top_Sona(Ratings):
pass
class NA_Aatrox_Top_Soraka(Ratings):
pass
class NA_Aatrox_Top_Swain(Ratings):
pass
class NA_Aatrox_Top_Syndra(Ratings):
pass
class NA_Aatrox_Top_TahmKench(Ratings):
pass
class NA_Aatrox_Top_Taliyah(Ratings):
pass
class NA_Aatrox_Top_Talon(Ratings):
pass
class NA_Aatrox_Top_Taric(Ratings):
pass
class NA_Aatrox_Top_Teemo(Ratings):
pass
class NA_Aatrox_Top_Thresh(Ratings):
pass
class NA_Aatrox_Top_Tristana(Ratings):
pass
class NA_Aatrox_Top_Trundle(Ratings):
pass
class NA_Aatrox_Top_Tryndamere(Ratings):
pass
class NA_Aatrox_Top_TwistedFate(Ratings):
pass
class NA_Aatrox_Top_Twitch(Ratings):
pass
class NA_Aatrox_Top_Udyr(Ratings):
pass
class NA_Aatrox_Top_Urgot(Ratings):
pass
class NA_Aatrox_Top_Varus(Ratings):
pass
class NA_Aatrox_Top_Vayne(Ratings):
pass
class NA_Aatrox_Top_Veigar(Ratings):
pass
class NA_Aatrox_Top_Velkoz(Ratings):
pass
class NA_Aatrox_Top_Vi(Ratings):
pass
class NA_Aatrox_Top_Viktor(Ratings):
pass
class NA_Aatrox_Top_Vladimir(Ratings):
pass
class NA_Aatrox_Top_Volibear(Ratings):
pass
class NA_Aatrox_Top_Warwick(Ratings):
pass
class NA_Aatrox_Top_Xayah(Ratings):
pass
class NA_Aatrox_Top_Xerath(Ratings):
pass
class NA_Aatrox_Top_XinZhao(Ratings):
pass
class NA_Aatrox_Top_Yasuo(Ratings):
pass
class NA_Aatrox_Top_Yorick(Ratings):
pass
class NA_Aatrox_Top_Zac(Ratings):
pass
class NA_Aatrox_Top_Zed(Ratings):
pass
class NA_Aatrox_Top_Ziggs(Ratings):
pass
class NA_Aatrox_Top_Zilean(Ratings):
pass
class NA_Aatrox_Top_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
| 972
| 6,545
| 4.736626
| 0.151235
| 0.209818
| 0.389661
| 0.479583
| 0.803432
| 0.803432
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169748
| 6,545
| 416
| 47
| 15.733173
| 0.847258
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
3f66936531dadafc5c2af036801fe5c879210a6a
| 5,876
|
py
|
Python
|
tests/optimizer/test_query.py
|
hongfuli/sharding-py
|
a26a64aa9d9196c830e7e2fa4095a58bef608a40
|
[
"Apache-2.0"
] | 1
|
2021-01-29T13:29:29.000Z
|
2021-01-29T13:29:29.000Z
|
tests/optimizer/test_query.py
|
hongfuli/sharding-py
|
a26a64aa9d9196c830e7e2fa4095a58bef608a40
|
[
"Apache-2.0"
] | null | null | null |
tests/optimizer/test_query.py
|
hongfuli/sharding-py
|
a26a64aa9d9196c830e7e2fa4095a58bef608a40
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from shardingpy.api.algorithm.sharding.values import ListShardingValue, RangeShardingValue
from shardingpy.constant import ShardingOperator
from shardingpy.optimizer.query_optimizer import QueryOptimizeEngine
from shardingpy.parsing.parser.context.condition import Condition, Column, AndCondition, OrCondition
from shardingpy.parsing.parser.expressionparser import SQLNumberExpression
class QueryOptimizeEngineTest(unittest.TestCase):
def test_optimize_always_false_list_conditions(self):
condition1 = Condition(Column('column', 'tbl'), ShardingOperator.IN, SQLNumberExpression(1),
SQLNumberExpression(2))
condition2 = Condition(Column('column', 'tbl'), ShardingOperator.EQUAL, SQLNumberExpression(3))
and_condition = AndCondition()
and_condition.conditions.extend([condition1, condition2])
or_condition = OrCondition()
or_condition.and_conditions.append(and_condition)
sharding_conditions = QueryOptimizeEngine(or_condition, []).optimize()
self.assertTrue(sharding_conditions.is_always_false())
def test_optimize_always_false_range_conditions(self):
condition1 = Condition(Column('column', 'tbl'), ShardingOperator.BETWEEN, SQLNumberExpression(1),
SQLNumberExpression(2))
condition2 = Condition(Column('column', 'tbl'), ShardingOperator.BETWEEN, SQLNumberExpression(3),
SQLNumberExpression(4))
and_condition = AndCondition()
and_condition.conditions.extend([condition1, condition2])
or_condition = OrCondition()
or_condition.and_conditions.append(and_condition)
sharding_conditions = QueryOptimizeEngine(or_condition, []).optimize()
self.assertTrue(sharding_conditions.is_always_false())
def test_optimize_always_false_list_conditions_and_range_conditions(self):
condition1 = Condition(Column('column', 'tbl'), ShardingOperator.IN, SQLNumberExpression(1),
SQLNumberExpression(2))
condition2 = Condition(Column('column', 'tbl'), ShardingOperator.BETWEEN, SQLNumberExpression(3),
SQLNumberExpression(4))
and_condition = AndCondition()
and_condition.conditions.extend([condition1, condition2])
or_condition = OrCondition()
or_condition.and_conditions.append(and_condition)
sharding_conditions = QueryOptimizeEngine(or_condition, []).optimize()
self.assertTrue(sharding_conditions.is_always_false())
def test_optimize_list_conditions(self):
condition1 = Condition(Column('column', 'tbl'), ShardingOperator.IN, SQLNumberExpression(1),
SQLNumberExpression(2))
condition2 = Condition(Column('column', 'tbl'), ShardingOperator.EQUAL, SQLNumberExpression(1))
and_condition = AndCondition()
and_condition.conditions.extend([condition1, condition2])
or_condition = OrCondition()
or_condition.and_conditions.append(and_condition)
sharding_conditions = QueryOptimizeEngine(or_condition, []).optimize()
self.assertFalse(sharding_conditions.is_always_false())
self.assertEqual(len(sharding_conditions.sharding_conditions), 1)
self.assertEqual(len(sharding_conditions.sharding_conditions[0].sharding_values), 1)
sharding_value = sharding_conditions.sharding_conditions[0].sharding_values[0]
self.assertTrue(isinstance(sharding_value, ListShardingValue))
self.assertEqual(sharding_value.values, [1])
def test_optimize_range_conditions(self):
condition1 = Condition(Column('column', 'tbl'), ShardingOperator.BETWEEN, SQLNumberExpression(1),
SQLNumberExpression(2))
condition2 = Condition(Column('column', 'tbl'), ShardingOperator.BETWEEN, SQLNumberExpression(1),
SQLNumberExpression(3))
and_condition = AndCondition()
and_condition.conditions.extend([condition1, condition2])
or_condition = OrCondition()
or_condition.and_conditions.append(and_condition)
sharding_conditions = QueryOptimizeEngine(or_condition, []).optimize()
self.assertFalse(sharding_conditions.is_always_false())
self.assertEqual(len(sharding_conditions.sharding_conditions), 1)
self.assertEqual(len(sharding_conditions.sharding_conditions[0].sharding_values), 1)
sharding_value = sharding_conditions.sharding_conditions[0].sharding_values[0]
self.assertTrue(isinstance(sharding_value, RangeShardingValue))
self.assertEqual(sharding_value.value_range.lower, 1)
self.assertEqual(sharding_value.value_range.upper, 2)
def test_optimize_list_conditions_and_range_conditions(self):
condition1 = Condition(Column('column', 'tbl'), ShardingOperator.IN, SQLNumberExpression(1),
SQLNumberExpression(2))
condition2 = Condition(Column('column', 'tbl'), ShardingOperator.BETWEEN, SQLNumberExpression(1),
SQLNumberExpression(3))
and_condition = AndCondition()
and_condition.conditions.extend([condition1, condition2])
or_condition = OrCondition()
or_condition.and_conditions.append(and_condition)
sharding_conditions = QueryOptimizeEngine(or_condition, []).optimize()
self.assertFalse(sharding_conditions.is_always_false())
self.assertEqual(len(sharding_conditions.sharding_conditions), 1)
self.assertEqual(len(sharding_conditions.sharding_conditions[0].sharding_values), 1)
sharding_value = sharding_conditions.sharding_conditions[0].sharding_values[0]
self.assertTrue(isinstance(sharding_value, ListShardingValue))
self.assertEqual(sharding_value.values, [1, 2])
| 59.959184
| 105
| 0.718005
| 547
| 5,876
| 7.469835
| 0.109689
| 0.132159
| 0.061674
| 0.070485
| 0.890602
| 0.886931
| 0.868331
| 0.861968
| 0.861968
| 0.861968
| 0
| 0.013813
| 0.186862
| 5,876
| 97
| 106
| 60.57732
| 0.841356
| 0
| 0
| 0.770115
| 0
| 0
| 0.018383
| 0
| 0
| 0
| 0
| 0
| 0.218391
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.149425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
451195759f558e82649bcc19792e5a910b133975
| 12,508
|
py
|
Python
|
tests/unit/models/field/oid.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
tests/unit/models/field/oid.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
tests/unit/models/field/oid.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
from datetime import datetime
from typing import Type, Any, Tuple
from bson import ObjectId
from models import OID_KEY
from models.field import ObjectIDField, BaseField
from models.field.exceptions import (
FieldTypeMismatchError, FieldNoneNotAllowedError, FieldOidStringInvalidError, FieldOidDatetimeOutOfRangeError,
FieldError, FieldReadOnlyError
)
from ._test_val import TestFieldValue
from ._test_prop import TestFieldProperty
__all__ = ["TestOidFieldProperty", "TestOidFieldValueAllowNone", "TestOidFieldValueDefault",
"TestOidFieldValueNoAutocast", "TestOidFieldValueNoKey", "TestOidFieldValueNotReadonly",
"TestOidFieldValueAllowNoneNotReadOnly"]
class TestOidFieldProperty(TestFieldProperty.TestClass):
def get_field_class(self) -> Type[BaseField]:
return ObjectIDField
def valid_not_none_obj_value(self) -> Any:
return ObjectId.from_datetime(datetime(2020, 5, 9))
def expected_none_object(self) -> Any:
return ObjectId("000000000000000000000000")
def get_valid_default_values(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_invalid_default_values(self) -> Tuple[Any, ...]:
return "A", 7, True, datetime(1920, 1, 1), datetime(2107, 1, 1)
def get_expected_types(self) -> Tuple[Type[Any], ...]:
return ObjectId, str, datetime
def get_desired_type(self) -> Type[Any]:
return ObjectId
class TestOidFieldValueDefault(TestFieldValue.TestClass):
def get_field(self) -> BaseField:
return ObjectIDField("k")
def get_value_type_match_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, False),
("A", True),
(7, False),
(True, False),
(datetime(1920, 1, 1), True),
(datetime(2107, 1, 1), True),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def get_value_validity_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, False),
("A", False),
(7, False),
(True, False),
(datetime(1920, 1, 1), False),
(datetime(2107, 1, 1), False),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def is_auto_cast(self) -> bool:
return True
def get_values_to_cast(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_valid_value_to_set(self) -> Tuple[Tuple[Any, Any], ...]:
return ()
def get_invalid_value_to_set(self) -> Tuple[Tuple[Any, Type[FieldError]], ...]:
return (
(ObjectId("000000000000000000000000"), FieldReadOnlyError),
("5eb5f2800000000000000000", FieldReadOnlyError),
(datetime(2020, 5, 9), FieldReadOnlyError),
(None, FieldReadOnlyError),
("A", FieldReadOnlyError),
(7, FieldReadOnlyError),
(True, FieldReadOnlyError),
(datetime(1920, 1, 1), FieldReadOnlyError),
(datetime(2107, 1, 1), FieldReadOnlyError)
)
class TestOidFieldValueNotReadonly(TestFieldValue.TestClass):
def get_field(self) -> BaseField:
return ObjectIDField("k", readonly=False)
def get_value_type_match_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, False),
("A", True),
(7, False),
(True, False),
(datetime(1920, 1, 1), True),
(datetime(2107, 1, 1), True),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def get_value_validity_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, False),
("A", False),
(7, False),
(True, False),
(datetime(1920, 1, 1), False),
(datetime(2107, 1, 1), False),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def is_auto_cast(self) -> bool:
return True
def get_values_to_cast(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_valid_value_to_set(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_invalid_value_to_set(self) -> Tuple[Tuple[Any, Type[FieldError]], ...]:
return (
(None, FieldNoneNotAllowedError),
("A", FieldOidStringInvalidError),
(7, FieldTypeMismatchError),
(True, FieldTypeMismatchError),
(datetime(1920, 1, 1), FieldOidDatetimeOutOfRangeError),
(datetime(2107, 1, 1), FieldOidDatetimeOutOfRangeError)
)
class TestOidFieldValueAllowNone(TestFieldValue.TestClass):
def get_field(self) -> BaseField:
return ObjectIDField("k", allow_none=True)
def get_value_type_match_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, True),
("A", True),
(7, False),
(True, False),
(datetime(1920, 1, 1), True),
(datetime(2107, 1, 1), True),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def get_value_validity_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, True),
("A", False),
(7, False),
(True, False),
(datetime(1920, 1, 1), False),
(datetime(2107, 1, 1), False),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def is_auto_cast(self) -> bool:
return True
def get_values_to_cast(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(None, None),
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_valid_value_to_set(self) -> Tuple[Tuple[Any, Any], ...]:
return ()
def get_invalid_value_to_set(self) -> Tuple[Tuple[Any, Type[FieldError]], ...]:
return (
(None, FieldReadOnlyError),
(ObjectId("000000000000000000000000"), FieldReadOnlyError),
("5eb5f2800000000000000000", FieldReadOnlyError),
(datetime(2020, 5, 9), FieldReadOnlyError),
("A", FieldReadOnlyError),
(7, FieldReadOnlyError),
(True, FieldReadOnlyError),
(datetime(1920, 1, 1), FieldReadOnlyError),
(datetime(2107, 1, 1), FieldReadOnlyError)
)
class TestOidFieldValueAllowNoneNotReadOnly(TestFieldValue.TestClass):
def get_field(self) -> BaseField:
return ObjectIDField("k", allow_none=True, readonly=False)
def get_value_type_match_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, True),
("A", True),
(7, False),
(True, False),
(datetime(1920, 1, 1), True),
(datetime(2107, 1, 1), True),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def get_value_validity_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, True),
("A", False),
(7, False),
(True, False),
(datetime(1920, 1, 1), False),
(datetime(2107, 1, 1), False),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def is_auto_cast(self) -> bool:
return True
def get_values_to_cast(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(None, None),
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_valid_value_to_set(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(None, None),
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000")),
)
def get_invalid_value_to_set(self) -> Tuple[Tuple[Any, Type[FieldError]], ...]:
return (
("A", FieldOidStringInvalidError),
(7, FieldTypeMismatchError),
(True, FieldTypeMismatchError),
(datetime(1920, 1, 1), FieldOidDatetimeOutOfRangeError),
(datetime(2107, 1, 1), FieldOidDatetimeOutOfRangeError)
)
class TestOidFieldValueNoAutocast(TestFieldValue.TestClass):
def get_field(self) -> BaseField:
return ObjectIDField("k", auto_cast=False)
def get_value_type_match_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, False),
("A", True),
(7, False),
(True, False),
(datetime(1920, 1, 1), True),
(datetime(2107, 1, 1), True),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def get_value_validity_test(self) -> Tuple[Tuple[Any, bool], ...]:
return (
(None, False),
("A", False),
(7, False),
(True, False),
(datetime(1920, 1, 1), False),
(datetime(2107, 1, 1), False),
(ObjectId("000000000000000000000000"), True),
("5eb5f2800000000000000000", True),
(datetime(2020, 5, 9), True)
)
def is_auto_cast(self) -> bool:
return False
def get_values_to_cast(self) -> Tuple[Tuple[Any, Any], ...]:
return (
(ObjectId("000000000000000000000000"), ObjectId("000000000000000000000000")),
("5eb5f2800000000000000000", ObjectId("5eb5f2800000000000000000")),
(datetime(2020, 5, 9), ObjectId("5eb5f2800000000000000000"))
)
def get_valid_value_to_set(self) -> Tuple[Tuple[Any, Any], ...]:
return ()
def get_invalid_value_to_set(self) -> Tuple[Tuple[Any, Type[FieldError]], ...]:
return (
(ObjectId("000000000000000000000000"), FieldReadOnlyError),
("5eb5f2800000000000000000", FieldReadOnlyError),
(datetime(2020, 5, 9), FieldReadOnlyError),
(None, FieldReadOnlyError),
("A", FieldReadOnlyError),
(7, FieldReadOnlyError),
(True, FieldReadOnlyError),
(datetime(1920, 1, 1), FieldReadOnlyError),
(datetime(2107, 1, 1), FieldReadOnlyError)
)
class TestOidFieldValueNoKey(TestOidFieldValueDefault):
def get_field(self) -> BaseField:
return ObjectIDField()
def test_key_name(self):
self.assertEqual(OID_KEY, self.get_field().key)
| 36.466472
| 114
| 0.583626
| 1,083
| 12,508
| 6.610342
| 0.072946
| 0.030172
| 0.050845
| 0.06174
| 0.820087
| 0.820087
| 0.820087
| 0.81408
| 0.81408
| 0.81408
| 0
| 0.203704
| 0.279021
| 12,508
| 342
| 115
| 36.573099
| 0.590153
| 0
| 0
| 0.763066
| 0
| 0
| 0.144947
| 0.141669
| 0
| 0
| 0
| 0
| 0.003484
| 1
| 0.15331
| false
| 0
| 0.027875
| 0.149826
| 0.355401
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 9
|
18cf4fef4f0a06ed0813c945dd5bbe21c102eec5
| 49,176
|
py
|
Python
|
simetuc/test/test_settings/test_settings.py
|
mrigankadeep/simetuc
|
ed85e53c69f0b669534d2d61c767fc1bf074859c
|
[
"MIT"
] | 3
|
2018-04-13T10:06:53.000Z
|
2021-11-17T11:29:52.000Z
|
simetuc/test/test_settings/test_settings.py
|
mrigankadeep/simetuc
|
ed85e53c69f0b669534d2d61c767fc1bf074859c
|
[
"MIT"
] | 3
|
2018-08-10T09:03:46.000Z
|
2020-07-18T06:56:31.000Z
|
simetuc/test/test_settings/test_settings.py
|
mrigankadeep/simetuc
|
ed85e53c69f0b669534d2d61c767fc1bf074859c
|
[
"MIT"
] | 1
|
2021-04-07T18:39:24.000Z
|
2021-04-07T18:39:24.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 22 00:10:24 2016
@author: Villanueva
"""
import pytest
import os
import numpy as np
import ruamel.yaml as yaml
import copy
from settings_parser import Settings, SettingsFileError, SettingsValueError, SettingsExtraValueWarning
import simetuc.settings as settings
from simetuc.util import temp_config_filename, LabelError
from simetuc.util import DecayTransition, IonType, EneryTransferProcess, Transition
test_folder_path = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='function')
def setup_cte_full_S(setup_cte):
copy_cte = copy.deepcopy(setup_cte)
copy_cte.states['sensitizer_states_labels'] = ['GS', '1ES', '2ES', '3ES']
copy_cte.states['sensitizer_states'] = 4
copy_cte.decay['branching_S'] = [DecayTransition(IonType.S, 1, 0, 1.0),
DecayTransition(IonType.S, 2, 1, 0.5),
DecayTransition(IonType.S, 3, 1, 0.01)]
copy_cte.optimization['processes'] = ['CR50', DecayTransition(IonType.S, 2, 1, 0.5)]
return copy_cte
data_all_mandatory_ok = data_branch_ok = '''version: 1
lattice:
# all fields here are mandatory
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states:
# all fields here are mandatory,
# leave empty if necessary (i.e.: just "sensitizer_ion_label" on a line), but don't delete them
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
excitations:
# the excitation label can be any text
# at this point, only one active excitation is suported
# the t_pulse value is only mandatory for the dynamics, it's ignored in the steady state
Vis_473:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
NIR_980:
active: False
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Yb(GS)->Yb(ES)
degeneracy: 4/3
pump_rate: 4.4e-3 # cm2/J
sensitizer_decay:
# lifetimes in s
ES: 2.5e-3
activator_decay:
# lifetimes in s
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
activator_branching_ratios:
# 3H5 and 3H4 to 3F4
3H5->3F4: 0.4
3H4->3F4: 0.3
# 3F3 to 3H4
3F3->3H4: 0.999
# 1G4 to 3F4, 3H5, 3H4 and 3F3
1G4->3F4: 0.15
1G4->3H5: 0.16
1G4->3H4: 0.04
1G4->3F3: 0.00
# 1D2 to 3F4
1D2->3F4: 0.43
'''
def test_standard_config(setup_cte_settings):
''''Test that the returned Settings instance for a know config file is correct.'''
filename = os.path.join(test_folder_path, 'test_standard_config.txt')
cte = settings.load(filename)
with open(filename, 'rt') as file:
config_file = file.read()
setup_cte_settings['config_file'] = config_file
assert cte.lattice == setup_cte_settings['lattice']
assert cte.states == setup_cte_settings['states']
assert cte.excitations == setup_cte_settings['excitations']
assert cte.decay == setup_cte_settings['decay']
assert cte.energy_transfer == setup_cte_settings['energy_transfer']
assert cte.optimization == setup_cte_settings['optimization']
assert cte.power_dependence == setup_cte_settings['power_dependence']
assert cte.concentration_dependence == setup_cte_settings['concentration_dependence']
assert cte == setup_cte_settings
def test_non_existing_file():
with pytest.raises(SettingsFileError) as excinfo:
# load non existing file
settings.load(os.path.join(test_folder_path, 'test_non_existing_config.txt'))
assert excinfo.match(r"Error reading file")
assert excinfo.type == SettingsFileError
def test_empty_file():
with pytest.raises(SettingsFileError) as excinfo:
with temp_config_filename('') as filename:
settings.load(filename)
assert excinfo.match(r"The settings file is empty or otherwise invalid")
assert excinfo.type == SettingsFileError
@pytest.mark.parametrize('bad_yaml_data', [':', '\t', 'key: value:',
'label1:\n key1:value1'+'label2:\n key2:value2'],
ids=['colon', 'tab', 'bad colon', 'bad value'])
def test_yaml_error_config(bad_yaml_data):
with pytest.raises(SettingsFileError) as excinfo:
with temp_config_filename(bad_yaml_data) as filename:
settings.load(filename)
assert excinfo.match(r"Error while parsing the config file")
assert excinfo.type == SettingsFileError
def test_not_dict_config():
with pytest.raises(SettingsFileError) as excinfo:
with temp_config_filename('vers') as filename:
settings.load(filename)
assert excinfo.match(r"The settings file is empty or otherwise invalid")
assert excinfo.type == SettingsFileError
def test_version_config():
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data_all_mandatory_ok.replace('version: 1',
'version: 2')) as filename:
settings.load(filename)
assert excinfo.match(r"cannot be larger than 1")
assert excinfo.type == SettingsValueError
def idfn(sections_data):
'''Returns the name of the test according to the parameters'''
num_l = len(sections_data.splitlines())
return 'sections_{}'.format(num_l)
import itertools
import operator
# list of mandatory sections
data = '''version: 1
lattice: asd
states: asd
excitations: asd
sensitizer_decay: asd
activator_decay: asd'''
# combinations of sections. At least 1 is missing
list_data = list(itertools.accumulate(data.splitlines(keepends=True)[:-1], operator.concat))
@pytest.mark.parametrize('sections_data', list_data, ids=idfn)
def test_sections_config(sections_data):
with pytest.raises(SettingsFileError) as excinfo:
with temp_config_filename(sections_data) as filename:
settings.load(filename)
assert excinfo.match(r"Those sections must be present")
assert excinfo.match(r"Sections that are needed but not present in the file")
assert excinfo.type == SettingsFileError
# should get a warning for an extra unrecognized section
def test_extra_sections_warning_config():
data = data_all_mandatory_ok+'''extra_unknown_section: dsa'''
with pytest.warns(SettingsExtraValueWarning) as warnings:
with temp_config_filename(data) as filename:
settings.load(filename)
assert len(warnings) == 2 # one warning
warning = warnings.pop(SettingsExtraValueWarning)
assert issubclass(warning.category, SettingsExtraValueWarning)
assert 'Some values or sections should not be present in the file' in str(warning.message)
data_lattice = '''version: 1
lattice:
name: bNaYF4
N_uc: {}
# concentration
S_conc: {}
A_conc: {}
# unit cell
# distances in Angstrom
a: {}
b: {}
c: {}
# angles in degree
alpha: {}
beta: {}
gamma: {}
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: {}
sites_occ: {}
states: asd
excitations: asd
sensitizer_decay: asd
activator_decay: asd
'''
# list of tuples of values for N_uc, S_conc, A_conc, a,b,c, alpha,
lattice_values = [('dsa', 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '[[0, 0, 0], [2/3, 1/3, 1/2]]', '[1, 1/2]'), # text instead of number
(0.3, 0.3, 0.3, 'dsa', 5.9, 3.5, 90, 90, 120, '[[0, 0, 0], [2/3, 1/3, 1/2]]', '[1, 1/2]'), # text instead of number
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '[]', '[]'), # empty occupancies
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '', ''), # empty occupancies 2
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '0, 0, 0', '[1.1, 1/2]'), # occupancy pos not a list
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '[[0, 0]]', '[1]'), # sites_pos must be list of 3 numbers
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '[[0, 0, 0], [one/5, 1/3, 1/2]]', '[1, 1/2]'), # sites_pos string
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '[[0, 0, 0], [1/3, 1/3, 1/2]]', '[1/2]'), # different number of occ.
(0.3, 0.3, 0.3, 5.9, 5.9, 3.5, 90, 90, 120, '[[0, 0, 0]]', '[1/2, 0.75]')] # different number of occ. 2
ids=['text instead of number', 'text instead of number',\
'empty occupancies', 'empty occupancies 2', 'occupancies pos not a list', 'sites_pos: list of 3 numbers',\
'sites_pos string', 'different number of occ.', 'different number of occ. 2']
@pytest.mark.parametrize('lattice_values', lattice_values, ids=ids)
def test_lattice_config(lattice_values):
data_format = data_lattice.format(*lattice_values)
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data_format) as filename:
settings.load(filename)
assert excinfo.type == SettingsValueError
data_lattice_occ_ok = '''lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90.0
beta: 90.0
gamma: 120.0
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: {}
sites_occ: {}
'''
# list of tuples of values for sites_pos and sites_occ
lattice_values = [('[0, 0, 0]', '1'), # one pos and occ
('[[0, 0, 0]]', '[1]')] # one pos and occ list of lists
ids=['one pos and occ', 'one pos and occ list of lists']
@pytest.mark.parametrize('lattice_values', lattice_values, ids=ids)
def test_lattice_config_ok_occs(lattice_values):
data_format = data_lattice_occ_ok.format(*lattice_values)
confs = Settings({'lattice': settings.configs.settings['lattice']})
with temp_config_filename(data_format) as filename:
confs.validate(filename)
for elem in ['name', 'spacegroup', 'N_uc', 'S_conc',
'A_conc', 'sites_pos', 'sites_occ']:
assert elem in confs.lattice
data_lattice_full = '''version: 1
lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
'''
def test_lattice_dmax():
data = data_lattice_full + ''' d_max: 100.0
d_max_coop: 25.0'''
data = data.replace('version: 1', '')
confs = Settings({'lattice': settings.configs.settings['lattice']})
with temp_config_filename(data) as filename:
confs.validate(filename)
assert confs.lattice['d_max'] == 100.0
def test_lattice_radius():
data = data_lattice_full + '''states:
asd: dsa
excitations:
asd: dsa
sensitizer_decay:
asd: dsa
activator_decay:
asd: dsa'''
data = data.replace('N_uc: 8', 'radius: 100.0')
with pytest.raises(SettingsValueError) as excinfo: # ok, error later
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'Error validating section "states"')
assert excinfo.match(r'"sensitizer_ion_label" not in dictionary')
assert excinfo.type == SettingsValueError
data_lattice_ok = '''version: 1
lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
excitations: asd
sensitizer_decay: asd
activator_decay: asd
'''
# error b/c section states is not a dictionary
def test_empty_states_config():
data = '''version: 1
lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states: asd
excitations: asd
sensitizer_decay: asd
activator_decay: asd
'''
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'Error validating section "states"')
assert excinfo.type == SettingsValueError
def test_states_no_states_labels():
data = data_lattice_ok + '''states:
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]'''
with pytest.raises(SettingsValueError) as excinfo: # missing key
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'Error validating section "states"')
assert excinfo.match(r'"sensitizer_ion_label" not in dictionary')
assert excinfo.type == SettingsValueError
def test_states_no_list():
data = data_lattice_ok + '''states:
sensitizer_ion_label: Yb
sensitizer_states_labels:
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]'''
with pytest.raises(SettingsValueError) as excinfo: # empty S labels
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"sensitizer_states_labels")
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_states_empty_list(): # empty list for S states
data = data_lattice_ok + '''states:
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: []'''
with pytest.raises(SettingsValueError) as excinfo: # empty S labels list
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'Length of activator_states_labels \(0\) cannot be smaller than 1.')
assert excinfo.type == SettingsValueError
def test_states_fractions(): # fractions in the state labels
data = data_lattice_ok + '''states:
sensitizer_ion_label: Yb
sensitizer_states_labels: [2F7/2, 2F5/2]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]'''
with pytest.raises(SettingsValueError) as excinfo: # it should fail in the excitations section
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'Setting "excitations"')
assert excinfo.match(r'does not have the right type')
assert excinfo.type == SettingsValueError
data_states_ok = '''version: 1
lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states:
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
sensitizer_decay:
ES: 2.5e-3
activator_decay:
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
'''
def test_excitations_config1():
data = data_states_ok + '''excitations:'''
with pytest.raises(SettingsValueError) as excinfo: # no excitations
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"excitations")
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_excitations_config2():
data = data_states_ok + '''excitations:
Vis_473:'''
with pytest.raises(SettingsValueError) as excinfo: # emtpy excitation
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_excitations_config3():
data = data_states_ok + '''excitations:
Vis_473:
active: False
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
with pytest.raises(SettingsValueError) as excinfo: # no active excitation
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"At least one excitation must be present and active")
assert excinfo.type == SettingsValueError
def test_excitations_config4():
data = data_states_ok + '''excitations:
Vis_473:
active: True
power_dens: dsa # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
with pytest.raises(SettingsValueError) as excinfo: # power_dens is a string
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_excitations_config5():
data = data_states_ok + '''excitations:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
with pytest.raises(SettingsValueError) as excinfo: # label missing
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_excitations_config7():
data = data_states_ok + '''excitations:
Vis_473:
active: True
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
with pytest.raises(SettingsValueError) as excinfo: # missing power_dens
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match('power_dens')
assert excinfo.type == SettingsValueError
def test_excitations_parse_excitations():
data_exc ='''Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
exc_dict = yaml.safe_load(data_exc)
states_dict = {'activator_states_labels': ['3H6', '3F4', '3H5', '3H4', '3F3', '1G4', '1D2'],
'sensitizer_states_labels': ['GS', 'ES'],
'activator_ion_label': 'Tm',
'sensitizer_ion_label': 'Yb'}
exc = settings._parse_excitations(states_dict, exc_dict)
excitation = exc['Vis_473'][0]
assert excitation.transition.ion == IonType.A
assert excitation.transition.state_i == 0
assert excitation.transition.state_f == 5
def test_abs_config_wrong_ions_labels():
data = data_states_ok + '''excitations:
Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: T(3H6) -> T(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
with pytest.raises(LabelError) as excinfo: # both labels are wrong
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'Incorrect ion label in excitation: T\(3H6\) -> T\(1G4\)')
assert excinfo.type == LabelError
def test_abs_config_wrong_ion_label():
data = data_states_ok + '''excitations:
NIR_980:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Y(GS)->Yb(ES)
degeneracy: 4/3
pump_rate: 4.4e-3 # cm2/J
'''
with pytest.raises(LabelError) as excinfo: # ion labels is wrong
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match('Incorrect ion label in excitation')
assert excinfo.type == LabelError
def test_abs_config_ok(): # good test
data = data_states_ok + '''excitations:
NIR_980:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Yb(GS)->Yb(ES)
degeneracy: 4/3
pump_rate: 4.4e-3 # cm2/J
'''
with temp_config_filename(data) as filename:
settings.load(filename)
def test_abs_config_ok_ESA(): # ok, ESA settings
data = data_states_ok + '''excitations:
NIR_800:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: [Tm(3H6)->Tm(3H4), Tm(3H5)->Tm(1G4)]
degeneracy: [13/9, 11/9]
pump_rate: [4.4e-3, 2e-3] # cm2/J
'''
with temp_config_filename(data) as filename:
settings.load(filename)
def test_abs_config6():
data = data_states_ok + '''excitations:
NIR_800:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: [Tm(3H6)->Tm(3H4), Tm(3H5)->Tm(1G4)]
degeneracy: [13/9]
pump_rate: [4.4e-3, 2e-3] # cm2/J
'''
with pytest.raises(SettingsValueError) as excinfo: # degeneracy list too short
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match("pump_rate, degeneracy, and process must have the same number of items")
assert excinfo.type == SettingsValueError
def test_abs_config7():
data = data_states_ok + '''excitations:
NIR_800:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: [Tm(3H6)->Tm(3H4), Tm(3H5)->Tm(1G4)]
degeneracy: [13/9, 11/9]
pump_rate: [4.4e-3, -2e-3] # cm2/J
'''
with pytest.raises(SettingsValueError) as excinfo: # pump rate must be positive
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match("cannot be smaller than 0.")
assert excinfo.type == SettingsValueError
def test_abs_config8(): # ok, ESA on different ions
data = data_states_ok + '''excitations:
NIR_800:
active: True
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: [Tm(3H6)->Tm(3H4), Yb(GS)->Yb(ES)]
degeneracy: [13/9, 11/9]
pump_rate: [4.4e-3, 2e-3] # cm2/J
'''
with temp_config_filename(data) as filename:
settings.load(filename)
data_abs_ok = '''version: 1
lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states:
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
excitations:
Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
'''
def test_decay_ok():
data = data_abs_ok + '''sensitizer_decay:
# lifetimes in s
ES: dsa
activator_decay:
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
'''
with pytest.raises(SettingsValueError) as excinfo: # decay rate is string
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_not_all_states_decay():
data = data_abs_ok + '''sensitizer_decay:
# lifetimes in s
ES: 1e-3
activator_decay:
'''
with pytest.raises(SettingsValueError) as excinfo: # all states must have a decay rate
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"activator_decay")
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
data = data_abs_ok + '''sensitizer_decay:
activator_decay:
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
'''
with pytest.raises(SettingsValueError) as excinfo: # all states must have a decay rate
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"sensitizer_decay")
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_decay_missing_A_state():
data = data_abs_ok + '''sensitizer_decay:
# lifetimes in s
ES: 1e-3
activator_decay:
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
'''
with pytest.raises(SettingsValueError) as excinfo: # 1D2 state missing
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"All activator states must have a decay rate")
assert excinfo.type == SettingsValueError
def test_decay_missing_S_state():
data = data_abs_ok + '''sensitizer_decay:
1ES: 2e-3
activator_decay:
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
'''
data = data.replace('sensitizer_states_labels: [GS, ES]',
'sensitizer_states_labels: [GS, 1ES, 2ES]')
with pytest.raises(SettingsValueError) as excinfo: # 2ES state missing
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"All sensitizer states must have a decay rate")
assert excinfo.type == SettingsValueError
def test_decay_config5():
data = data_abs_ok + '''sensitizer_decay:
# lifetimes in s
ES: 1e-3
activator_decay:
# lifetimes in s
34: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong state label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"is not a valid state label")
assert excinfo.type == settings.LabelError
data_decay_ok = '''version: 1 # mandatory, only 1 is supported at the moment
lattice:
# all fields here are mandatory
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states:
# all fields here are mandatory,
# leave empty if necessary (i.e.: just "sensitizer_ion_label" on a line), but don't delete them
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
excitations:
# the excitation label can be any text
# at this point, only one active excitation is suported
# the t_pulse value is only mandatory for the dynamics, it's ignored in the steady state
Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
NIR_980:
active: False
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Yb(GS)->Yb(ES)
degeneracy: 4/3
pump_rate: 4.4e-3 # cm2/J
sensitizer_decay:
# lifetimes in s
ES: 2.5e-3
activator_decay:
# lifetimes in s
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
'''
def test_branch_config_ok(): # all ok
data = data_decay_ok + '''sensitizer_branching_ratios:
ES->GS: 1.0
activator_branching_ratios:
3H5->3F4: 0.4
3H4->3F4: 0.3
3F3->3H4: 0.999
1G4->3F4: 0.15
1G4->3H5: 0.16
1G4->3H4: 0.04
1G4->3F3: 0.00
1D2->3F4: 0.43
'''
with temp_config_filename(data) as filename:
settings.load(filename)
def test_branch_config_wrong_label():
data = data_decay_ok + '''
activator_branching_ratios:
3H->3F4: 0.4
3H4->3F4: 0.3
3F3->3H4: 0.999
1G4->3F4: 0.15
1G4->3H5: 0.16
1G4->3H4: 0.04
1G4->3F3: 0.00
1D2->3F4: 0.43
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong state label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"is not a valid state label")
assert excinfo.type == settings.LabelError
def test_branch_config_value_above_1():
data = data_decay_ok + '''
activator_branching_ratios:
3H5->3F4: 1.4
3H4->3F4: 0.3
3F3->3H4: 0.999
1G4->3F4: 0.15
1G4->3H5: 0.16
1G4->3H4: 0.04
1G4->3F3: 0.00
1D2->3F4: 0.43
'''
with pytest.raises(SettingsValueError) as excinfo: # value above 1.0
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r'cannot be larger than 1')
assert excinfo.type == SettingsValueError
def test_ET_config_wrong_ion_label():
data = data_branch_ok + '''energy_transfer:
CR50:
process: T(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: 6
strength: 8.87920884e+08
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong ion label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"is not a valid ion label")
assert excinfo.type == settings.LabelError
def test_ET_config_wrong_state_label():
data = data_branch_ok + '''energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Tm(34) + Tm(3H5)
multipolarity: 6
strength: 8.87920884e+08
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong state label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"is not a valid state label")
assert excinfo.type == settings.LabelError
def test_ET_config_wrong_multipolarity():
data = data_branch_ok + '''energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: fds
strength: 8.87920884e+08
'''
with pytest.raises(SettingsValueError) as excinfo: # wrong multipolarity
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_ET_config_wrong_initial_final_ion_label():
data = data_branch_ok + '''energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Yb(3H4) + Tm(3H5)
multipolarity: 6
strength: 8.87920884e+08
'''
with pytest.raises(settings.LabelError) as excinfo: # initial ion label should be the same
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"is not a valid state label")
assert excinfo.type == settings.LabelError
def test_ET_config_duplicate_ET_labels():
data = data_branch_ok + '''energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Yb(3H4) + Tm(3H5)
multipolarity: 6
strength: 8.87920884e+08
CR50:
process: Yb(ES) + Tm(3H6) -> Yb(GS) + Tm(3H5)
multipolarity: 8
strength: 1e3
'''
with pytest.raises(SettingsValueError) as excinfo: # duplicate labels
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Duplicate label")
assert excinfo.type == SettingsValueError
def test_ET_config_missing_strength():
data = data_branch_ok + '''energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: 6
'''
with pytest.raises(SettingsValueError) as excinfo: # strength missing
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match('"strength" not in dictionary')
assert excinfo.type == SettingsValueError
def test_ET_config_missing_ETlabel():
data = data_branch_ok + '''energy_transfer:
process: Tm(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: 6
'''
with pytest.raises(SettingsValueError) as excinfo: # label missing
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_ET_ok(): # ok
data = data_branch_ok + '''energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: 6
strength: 1e3
strength_avg: 1e1
'''
with temp_config_filename(data) as filename:
settings.load(filename)
def test_ET_coop_ok(): # ok
data = data_branch_ok + '''energy_transfer:
CR50:
process: Yb(ES) + Yb(ES) + Tm(3H6) -> Yb(GS) + Yb(GS) + Tm(1G4)
multipolarity: 6
strength: 1e3
'''
with temp_config_filename(data) as filename:
settings.load(filename)
data_ET_ok = '''version: 1 # mandatory, only 1 is supported at the moment
lattice:
# all fields here are mandatory
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states:
# all fields here are mandatory,
# leave empty if necessary (i.e.: just "sensitizer_ion_label" on a line), but don't delete them
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
excitations:
# the excitation label can be any text
# at this point, only one active excitation is suported
# the t_pulse value is only mandatory for the dynamics, it's ignored in the steady state
Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
NIR_980:
active: False
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Yb(GS)->Yb(ES)
degeneracy: 4/3
pump_rate: 4.4e-3 # cm2/J
sensitizer_decay:
# lifetimes in s
ES: 2.5e-3
activator_decay:
# lifetimes in s
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
sensitizer_branching_ratios:
# nothing. This section is still mandatory, though
activator_branching_ratios:
# 3H5 and 3H4 to 3F4
3H5->3F4: 0.4
3H4->3F4: 0.3
# 3F3 to 3H4
3F3->3H4: 0.999
# 1G4 to 3F4, 3H5, 3H4 and 3F3
1G4->3F4: 0.15
1G4->3H5: 0.16
1G4->3H4: 0.04
1G4->3F3: 0.00
# 1D2 to 3F4
1D2->3F4: 0.43
energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: 6
strength: 8.87920884e+08
ETU53:
process: Tm(1G4) + Tm(3H4) -> Tm(1D2) + Tm(3F4)
multipolarity: 6
strength: 4.50220614e+08
ETU55:
process: Tm(1G4) + Tm(1G4) -> Tm(1D2) + Tm(3F3)
multipolarity: 6
strength: 0 # 4.50220614e+7
ETU1:
process: Yb(ES) + Tm(3H6) -> Yb(GS) + Tm(3H5)
multipolarity: 6
strength: 1e4
BackET:
process: Tm(3H4) + Yb(GS) -> Tm(3H6) + Yb(ES)
multipolarity: 6
strength: 4.50220614e+3
EM:
process: Yb(ES) + Yb(GS) -> Yb(GS) + Yb(ES)
multipolarity: 6
strength: 4.50220614e+10
'''
data_ET_ok_full_S = '''version: 1 # mandatory, only 1 is supported at the moment
lattice:
# all fields here are mandatory
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
states:
# all fields here are mandatory,
# leave empty if necessary (i.e.: just "sensitizer_ion_label" on a line), but don't delete them
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, 1ES, 2ES, 3ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
excitations:
# the excitation label can be any text
# at this point, only one active excitation is suported
# the t_pulse value is only mandatory for the dynamics, it's ignored in the steady state
Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
NIR_980:
active: False
power_dens: 1e7 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Yb(GS)->Yb(1ES)
degeneracy: 4/3
pump_rate: 4.4e-3 # cm2/J
sensitizer_decay:
# lifetimes in s
1ES: 2.5e-3
2ES: 2.5e-4
3ES: 2.5e-5
activator_decay:
# lifetimes in s
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
sensitizer_branching_ratios:
2ES -> 1ES: 0.5
3ES -> 1ES: 0.01
activator_branching_ratios:
# 3H5 and 3H4 to 3F4
3H5->3F4: 0.4
3H4->3F4: 0.3
# 3F3 to 3H4
3F3->3H4: 0.999
# 1G4 to 3F4, 3H5, 3H4 and 3F3
1G4->3F4: 0.15
1G4->3H5: 0.16
1G4->3H4: 0.04
1G4->3F3: 0.00
# 1D2 to 3F4
1D2->3F4: 0.43
energy_transfer:
CR50:
process: Tm(1G4) + Tm(3H6) -> Tm(3H4) + Tm(3H5)
multipolarity: 6
strength: 8.87920884e+08
ETU53:
process: Tm(1G4) + Tm(3H4) -> Tm(1D2) + Tm(3F4)
multipolarity: 6
strength: 4.50220614e+08
ETU55:
process: Tm(1G4) + Tm(1G4) -> Tm(1D2) + Tm(3F3)
multipolarity: 6
strength: 0 # 4.50220614e+7
ETU1:
process: Yb(1ES) + Tm(3H6) -> Yb(GS) + Tm(3H5)
multipolarity: 6
strength: 1e4
BackET:
process: Tm(3H4) + Yb(GS) -> Tm(3H6) + Yb(1ES)
multipolarity: 6
strength: 4.50220614e+3
EM:
process: Yb(1ES) + Yb(GS) -> Yb(GS) + Yb(1ES)
multipolarity: 6
strength: 4.50220614e+10
'''
# test optimization processes
def test_optim_wrong_proc():
'''Wrong ET optimization process'''
data = data_ET_ok + '''optimization:
processes: [ETU_does_no_exist]
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong ET process label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Wrong labels in optimization: processes")
assert excinfo.type == settings.LabelError
def test_optim_wrong_proc_2():
'''Wrong ET optimization process'''
data = data_ET_ok + '''optimization:
processes: [ETU53, ETU_does_no_exist]
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong ET process label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Wrong labels in optimization: processes")
assert excinfo.type == settings.LabelError
def test_optim_wrong_B_proc():
'''Wrong branching ration optimization process'''
data = data_ET_ok + '''optimization:
processes: [3F3->3H5]
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong ET process label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Wrong labels in optimization: processes")
assert excinfo.type == settings.LabelError
def test_optim_wrong_B_proc_label():
'''Wrong branching ration optimization process'''
data = data_ET_ok + '''optimization:
processes: [3H145->3F4]
'''
with pytest.raises(settings.LabelError) as excinfo: # wrong ET process label
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Wrong labels in optimization: processes")
assert excinfo.type == settings.LabelError
@pytest.mark.parametrize('data_proc', [(data_ET_ok, '3H5->3F4', Transition(IonType.A, 2, 1)),
(data_ET_ok_full_S, '3ES->1ES', Transition(IonType.S, 3, 1))])
def test_optim_ok_proc(data_proc): # ok
data = data_proc[0] + '''optimization:
processes: [ETU53, {}]
'''.format(data_proc[1])
with temp_config_filename(data) as filename:
cte = settings.load(filename)
ETU53 = EneryTransferProcess([Transition(IonType.A, 5, 6), Transition(IonType.A, 3, 1)],
mult=6, strength=2.5e8, name='ETU53')
assert cte.optimization['processes'] == [ETU53, data_proc[2]]
def test_optim_method(): # ok
data = data_ET_ok + '''optimization:
method: COBYLA'''
with temp_config_filename(data) as filename:
cte = settings.load(filename)
assert cte.optimization['method'] == 'COBYLA'
def test_optim_excitations(): # ok
data = data_ET_ok + '''optimization:
excitations: [Vis_473, NIR_980]'''
with temp_config_filename(data) as filename:
cte = settings.load(filename)
assert cte.optimization['excitations'] == ['Vis_473', 'NIR_980']
def test_optim_wrong_excitations(): # ok
data = data_ET_ok + '''optimization:
excitations: [Vis_473, wrong_label]'''
with pytest.raises(settings.LabelError) as excinfo:
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"in optimization: excitations")
assert excinfo.match(r"not found in excitations section above!")
assert excinfo.type == settings.LabelError
# test simulation params
data_sim_params = '''simulation_params:
rtol: 1e-3
atol: 1e-15
N_steps_pulse: 100
N_steps: 1000
'''
def test_sim_params_config1(): # ok
data = data_ET_ok + data_sim_params
with temp_config_filename(data) as filename:
print(filename)
cte = settings.load(filename)
assert cte.simulation_params == dict([('rtol', 1e-3),
('atol', 1e-15),
('N_steps_pulse', 100),
('N_steps', 1000)])
def test_pow_dep_config1(): # ok
data = data_ET_ok + '''power_dependence: [1e0, 1e7, 8]'''
with temp_config_filename(data) as filename:
cte = settings.load(filename)
assert np.alltrue(cte.power_dependence == np.array([1.00000000e+00, 1.00000000e+01, 1.00000000e+02,
1.00000000e+03, 1.00000000e+04, 1.00000000e+05,
1.00000000e+06, 1.00000000e+07]))
def test_pow_dep_config2(): # not present
data = data_ET_ok + ''''''
with temp_config_filename(data) as filename:
cte = settings.load(filename)
assert 'power_dependence' not in cte
def test_pow_dep_config3(): # empty
data = data_ET_ok + '''power_dependence: []'''
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Length of power_dependence")
assert excinfo.match(r"cannot be smaller than 3")
assert excinfo.type == SettingsValueError
def test_pow_dep_config4(): # text instead numbers
data = data_ET_ok + '''power_dependence: [asd, 1e7, 8]'''
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"does not have the right type")
assert excinfo.type == SettingsValueError
def test_conc_dep_config1(): # ok
data = data_ET_ok + '''concentration_dependence:
concentrations: [[0, 1, 2], [0.01, 0.1, 0.2, 0.3, 0.4, 0.5]]'''
with temp_config_filename(data) as filename:
cte = settings.load(filename)
assert cte.concentration_dependence['concentrations'] == [
(0.0, 0.01), (1.0, 0.01), (2.0, 0.01), (0.0, 0.1),
(1.0, 0.1), (2.0, 0.1), (0.0, 0.2), (1.0, 0.2),
(2.0, 0.2), (0.0, 0.3), (1.0, 0.3), (2.0, 0.3),
(0.0, 0.4), (1.0, 0.4), (2.0, 0.4), (0.0, 0.5),
(1.0, 0.5), (2.0, 0.5)]
def test_conc_dep_config2(): # not present
data = data_ET_ok + ''''''
with temp_config_filename(data) as filename:
cte = settings.load(filename)
assert 'concentration_dependence' not in cte
def test_conc_dep_config3(): # ok, but empty
data = data_ET_ok + '''concentration_dependence:
concentrations: []'''
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"Length of concentrations")
assert excinfo.match(r"cannot be smaller than 2")
assert excinfo.type == SettingsValueError
def test_conc_dep_config4(): # negative number
data = data_ET_ok + '''concentration_dependence:
concentrations: [[0, 1, 2], [-0.01, 0.1, 0.2, 0.3, 0.4, 0.5]]'''
with pytest.raises(SettingsValueError) as excinfo:
with temp_config_filename(data) as filename:
settings.load(filename)
assert excinfo.match(r"cannot be smaller than 0")
assert excinfo.type == SettingsValueError
# test extra value in section lattice
def test_extra_value():
extra_data = '''version: 1
lattice:
name: bNaYF4
N_uc: 8
# concentration
S_conc: 0.3
A_conc: 0.3
# unit cell
# distances in Angstrom
a: 5.9738
b: 5.9738
c: 3.5297
# angles in degree
alpha: 90
beta: 90
gamma: 120
# the number is also ok for the spacegroup
spacegroup: P-6
# info about sites
sites_pos: [[0, 0, 0], [2/3, 1/3, 1/2]]
sites_occ: [1, 1/2]
extra_value: 3
states:
sensitizer_ion_label: Yb
sensitizer_states_labels: [GS, ES]
activator_ion_label: Tm
activator_states_labels: [3H6, 3F4, 3H5, 3H4, 3F3, 1G4, 1D2]
excitations:
Vis_473:
active: True
power_dens: 1e6 # power density W/cm^2
t_pulse: 1e-8 # pulse width, seconds
process: Tm(3H6) -> Tm(1G4) # both ion labels are required
degeneracy: 13/9
pump_rate: 9.3e-4 # cm2/J
sensitizer_decay:
# lifetimes in s
ES: 2.5e-3
activator_decay:
# lifetimes in s
3F4: 12e-3
3H5: 25e-6
3H4: 2e-3
3F3: 2e-6
1G4: 760e-6
1D2: 67.5e-6
sensitizer_branching_ratios:
activator_branching_ratios:
'''
with pytest.warns(SettingsExtraValueWarning) as warnings: # "extra_value" in lattice section
with temp_config_filename(extra_data) as filename:
settings.load(filename)
assert len(warnings) == 1 # one warning
warning = warnings.pop(SettingsExtraValueWarning)
assert issubclass(warning.category, SettingsExtraValueWarning)
assert 'Some values or sections should not be present in the file' in str(warning.message)
| 32.246557
| 133
| 0.642875
| 7,022
| 49,176
| 4.361151
| 0.060809
| 0.044148
| 0.038205
| 0.045977
| 0.821121
| 0.801985
| 0.771519
| 0.733281
| 0.715942
| 0.697721
| 0
| 0.075635
| 0.243696
| 49,176
| 1,524
| 134
| 32.267717
| 0.747768
| 0.041077
| 0
| 0.780792
| 0
| 0.027859
| 0.521717
| 0.032889
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.049853
| false
| 0
| 0.008065
| 0
| 0.059384
| 0.000733
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
18f212b8830be863c335a9df99d4155c38e152b1
| 190
|
py
|
Python
|
my_project/my_app/tests.py
|
sotch-pr35mac/clinc-business-logic-server-template-python
|
6178edb7b3bb368376720fe2baf0babe65a37329
|
[
"BSD-3-Clause"
] | 1
|
2019-11-11T15:40:35.000Z
|
2019-11-11T15:40:35.000Z
|
my_project/my_app/tests.py
|
sotch-pr35mac/clinc-business-logic-server-template-python
|
6178edb7b3bb368376720fe2baf0babe65a37329
|
[
"BSD-3-Clause"
] | 3
|
2018-08-06T20:43:38.000Z
|
2021-06-10T20:43:52.000Z
|
my_project/my_app/tests.py
|
sotch-pr35mac/clinc-business-logic-server-template-python
|
6178edb7b3bb368376720fe2baf0babe65a37329
|
[
"BSD-3-Clause"
] | 2
|
2018-08-06T18:37:39.000Z
|
2019-05-23T13:27:56.000Z
|
"""
Create your tests here.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase # pylint: disable=unused-import
# Create your tests here.
| 19
| 65
| 0.721053
| 25
| 190
| 5.28
| 0.72
| 0.151515
| 0.227273
| 0.287879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00625
| 0.157895
| 190
| 9
| 66
| 21.111111
| 0.81875
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
18fd285c66536a39166462471fd0b885b76751f6
| 31,463
|
py
|
Python
|
pynq/lib/video/mipi_rx.py
|
michalkouril/PYNQ
|
c72febc2decc83816f40b91a7f60e11fe707c248
|
[
"BSD-3-Clause"
] | 1
|
2021-12-18T09:54:09.000Z
|
2021-12-18T09:54:09.000Z
|
pynq/lib/video/mipi_rx.py
|
michalkouril/PYNQ
|
c72febc2decc83816f40b91a7f60e11fe707c248
|
[
"BSD-3-Clause"
] | null | null | null |
pynq/lib/video/mipi_rx.py
|
michalkouril/PYNQ
|
c72febc2decc83816f40b91a7f60e11fe707c248
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pynq import DefaultIP
__author__ = "Mario Ruiz"
__copyright__ = "Copyright 2021, Xilinx"
__email__ = "pynq_support@xilinx.com"
_registers = {
'core_configuration': {'address_offset': 0x00, 'access': 'read-write', 'size': 32, 'host_size': 4, 'description': 'The Core Configuration register', 'type': 'uint',
'fields': {
'core_enabled': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 1, 'description': 'Core Enabled'},
'soft_reset': {'access': 'read-write','bit_offset': 1,'bit_width': 1,'description': 'Soft Reset'}
}
},
'protocol_configuration': {'address_offset': 0x04, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Protocol Configuration register', 'type': 'uint',
'fields': {
'active_lanes': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 2, 'description': 'Active Lanes'},
'maximum_lanes': {'access': 'read-write','bit_offset': 3,'bit_width': 2,'description': 'Maximum Lane'}
}
},
'core_status': {'address_offset': 0x10, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Core Status register', 'type': 'uint',
'fields': {
'soft_reset': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 1, 'description': 'Indicates that internal soft reset/core disable activities are in progress'},
'stream_full': {'access': 'read-only', 'bit_offset': 1, 'bit_width': 1, 'description': 'Stream Line buffer Full: indicates the current status of line buffer full condition'},
'shot_packet_fifo_not_empty': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'Short packet FIFO not empty: Indicates the current status of short packet FIFO not empty condition'},
'shot_packet_fifo_full': {'access': 'read-only', 'bit_offset': 3, 'bit_width': 1, 'description': 'Short packet FIFO Full: FIFO full: Indicates the current status of short packet FIFO full condition'},
'packet_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Counts number of long packets written to the line buffer'}
}
},
'global_interrupt_enable': {'address_offset': 0x20, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Global Interrupt Enable register', 'type': 'uint',
'fields': {
'global_interrupt': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 1, 'description': 'Master enable for the device interrupt output to the system'}
}
},
'interrupt_status': {'address_offset': 0x24, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Interrupt Status register', 'type': 'uint',
'fields': {
'Frame level error for VC0': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 1, 'description': 'Frame level error for VC0'},
'Frame synchronization error for VC0': {'access': 'read-write', 'bit_offset': 1, 'bit_width': 1, 'description': 'Frame synchronization error for VC0'},
'Frame level error for VC1': {'access': 'read-write', 'bit_offset': 2, 'bit_width': 1, 'description': 'Frame level error for VC1'},
'Frame synchronization error for VC1': {'access': 'read-write', 'bit_offset': 3, 'bit_width': 1, 'description': 'Frame synchronization error for VC1'},
'Frame level error for VC2': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Frame level error for VC2'},
'Frame synchronization error for VC2': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'Frame synchronization error for VC2'},
'Frame level error for VC3': {'access': 'read-write', 'bit_offset': 6, 'bit_width': 1, 'description': 'Frame level error for VC3'},
'Frame synchronization error for VC3': {'access': 'read-write', 'bit_offset': 7, 'bit_width': 1, 'description': 'Frame synchronization error for VC3'},
'Unsupported Data Type': {'access': 'read-write', 'bit_offset': 8, 'bit_width': 1, 'description': 'Unsupported Data Type'},
'CRC error': {'access': 'read-write', 'bit_offset': 9, 'bit_width': 1, 'description': 'CRC error'},
'ECC 1-bit error': {'access': 'read-write', 'bit_offset': 10, 'bit_width': 1, 'description': 'ECC 1-bit error'},
'ECC 2-bit error': {'access': 'read-write', 'bit_offset': 11, 'bit_width': 1, 'description': 'ECC 2-bit error'},
'SoT sync error': {'access': 'read-write', 'bit_offset': 12, 'bit_width': 1, 'description': 'SoT sync error'},
'SoT error': {'access': 'read-write', 'bit_offset': 13, 'bit_width': 1, 'description': 'SoT error'},
'stop_state': {'access': 'read-write', 'bit_offset': 17, 'bit_width': 1, 'description': 'Active-High signal indicates that the lane module is currently in Stop state'},
'stream line buffer full ': {'access': 'read-write', 'bit_offset': 18, 'bit_width': 1, 'description': 'Asserts when the line buffer is full'},
'Short packet FIFO not empty': {'access': 'read-write', 'bit_offset': 19, 'bit_width': 1, 'description': 'Active-High signal asserted when short packet FIFO not empty condition detected'},
'Short packet FIFO full': {'access': 'read-write', 'bit_offset': 20, 'bit_width': 1, 'description': 'Active-High signal asserted when the short packet FIFO full condition detected'},
'Incorrect lane configuration': {'access': 'read-write', 'bit_offset': 21, 'bit_width': 1, 'description': 'Asserted when Active lanes is greater than Maximum lanes in the protocol configuration register'},
'Word Count corruption': {'access': 'read-write', 'bit_offset': 22, 'bit_width': 1, 'description': 'Asserted when WC field of packet header corrupted and core receives less bytes than indicated in WC field'},
'UV420 WC Error': {'access': 'read-write', 'bit_offset': 28, 'bit_width': 1, 'description': 'Asserted when the user-configured YUV420 word count value is less than the actual Y-line word count of the incoming data, which results in an internal buffer full condition.'},
'RX_Skewcalhs': {'access': 'read-write', 'bit_offset': 29, 'bit_width': 1, 'description': 'Asserted when rxskewcalhs is detected.'},
'VCX Frame Error': {'access': 'read-only', 'bit_offset': 30, 'bit_width': 1, 'description': 'Asserted when the VCX Frame error is detected'},
'Frame Received': {'access': 'read-write', 'bit_offset': 31, 'bit_width': 1, 'description': 'Asserted when the Frame End (FE) short packet is received for the current frame'}
}
},
'interrupt_enable': {'address_offset': 0x28, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Interrupt Enable register', 'type': 'uint',
'fields': {
'Frame level error for VC0': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame synchronization error for VC0': {'access': 'read-write', 'bit_offset': 1, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame level error for VC1': {'access': 'read-write', 'bit_offset': 2, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame synchronization error for VC1': {'access': 'read-write', 'bit_offset': 3, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame level error for VC2': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame synchronization error for VC2': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame level error for VC3': {'access': 'read-write', 'bit_offset': 6, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame synchronization error for VC3': {'access': 'read-write', 'bit_offset': 7, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Unsupported Data Type': {'access': 'read-write', 'bit_offset': 8, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'CRC error': {'access': 'read-write', 'bit_offset': 9, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'ECC 1-bit error': {'access': 'read-write', 'bit_offset': 10, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'ECC 2-bit error': {'access': 'read-write', 'bit_offset': 11, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'SoT sync error': {'access': 'read-write', 'bit_offset': 12, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'SoT error': {'access': 'read-write', 'bit_offset': 13, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'stop_state': {'access': 'read-write', 'bit_offset': 17, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'stream line buffer full ': {'access': 'read-write', 'bit_offset': 18, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Short packet FIFO not empty': {'access': 'read-write', 'bit_offset': 19, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Short packet FIFO full': {'access': 'read-write', 'bit_offset': 20, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Incorrect lane configuration': {'access': 'read-write', 'bit_offset': 21, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Word Count corruption': {'access': 'read-write', 'bit_offset': 22, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'UV420 WC Error': {'access': 'read-write', 'bit_offset': 28, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'RX_Skewcalhs': {'access': 'read-write', 'bit_offset': 29, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'VCX Frame Error': {'access': 'read-write', 'bit_offset': 30, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'},
'Frame Received': {'access': 'read-write', 'bit_offset': 31, 'bit_width': 1, 'description': 'Set bits in this register to 1 to generate the required interrupts'}
}
},
'generic_short_packet': {'address_offset': 0x30, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Generic Short Packet register', 'type': 'uint',
'fields': {
'data_type': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 6, 'description': 'Generic short packet code'},
'virtual_channel': {'access': 'read-only', 'bit_offset': 6, 'bit_width': 2, 'description': 'Virtual channel number'},
'data': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': '16-bit short packet data'}
}
},
'vcx_frame_error': {'address_offset': 0x34, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The VCX Frame Error register', 'type': 'uint',
'fields': {
'Fame level error for VC4': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC4': {'access': 'read-write', 'bit_offset': 1, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC5': {'access': 'read-write', 'bit_offset': 2, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC5': {'access': 'read-write', 'bit_offset': 3, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC6': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC6': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC7': {'access': 'read-write', 'bit_offset': 6, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC7': {'access': 'read-write', 'bit_offset': 7, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC8': {'access': 'read-write', 'bit_offset': 8, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC8': {'access': 'read-write', 'bit_offset': 9, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC9': {'access': 'read-write', 'bit_offset': 10, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC9': {'access': 'read-write', 'bit_offset': 11, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC10': {'access': 'read-write', 'bit_offset': 12, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC10': {'access': 'read-write', 'bit_offset': 13, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC11': {'access': 'read-write', 'bit_offset': 14, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC11': {'access': 'read-write', 'bit_offset': 15, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC12': {'access': 'read-write', 'bit_offset': 16, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC12': {'access': 'read-write', 'bit_offset': 17, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC13': {'access': 'read-write', 'bit_offset': 18, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC13': {'access': 'read-write', 'bit_offset': 19, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC14': {'access': 'read-write', 'bit_offset': 20, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC14': {'access': 'read-write', 'bit_offset': 21, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'},
'Fame level error for VC15': {'access': 'read-write', 'bit_offset': 22, 'bit_width': 1, 'description': 'Asserted after an FE when the data payload received between FS and FE contains errors'},
'Fame synchronization error for VC15': {'access': 'read-write', 'bit_offset': 23, 'bit_width': 1, 'description': 'Asserted when an FE is not paired with a Frame Start (FS) on the same virtual channel'}
}
},
'clock_lane_information': {'address_offset': 0x3C, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Clock Lane Information register', 'type': 'uint',
'fields': {
'stop_state': {'access': 'read-only', 'bit_offset': 1, 'bit_width': 1, 'description': 'Stop state on clock lane'}
}
},
'lane_0_information': {'address_offset': 0x40, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Lane0 Information register', 'type': 'uint',
'fields': {
'SoT Sync error': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 1, 'description': 'Detection of SoT Synchronization Error'},
'SoT error': {'access': 'read-only', 'bit_offset': 1, 'bit_width': 1, 'description': 'Detection of SoT Error'},
'skewcalhs': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'Indicates the deskew reception'},
'stop_state': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'etection of stop state'}
}
},
'lane_1_information': {'address_offset': 0x44, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Lane1 Information register', 'type': 'uint',
'fields': {
'SoT Sync error': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 1, 'description': 'Detection of SoT Synchronization Error'},
'SoT error': {'access': 'read-only', 'bit_offset': 1, 'bit_width': 1, 'description': 'Detection of SoT Error'},
'skewcalhs': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'Indicates the deskew reception'},
'stop_state': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'etection of stop state'}
}
},
'lane_2_information': {'address_offset': 0x48, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Lane2 Information register', 'type': 'uint',
'fields': {
'SoT Sync error': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 1, 'description': 'Detection of SoT Synchronization Error'},
'SoT error': {'access': 'read-only', 'bit_offset': 1, 'bit_width': 1, 'description': 'Detection of SoT Error'},
'skewcalhs': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'Indicates the deskew reception'},
'stop_state': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'etection of stop state'}
}
},
'lane_3_information': {'address_offset': 0x4C, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'The Lane3 Information register', 'type': 'uint',
'fields': {
'SoT Sync error': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 1, 'description': 'Detection of SoT Synchronization Error'},
'SoT error': {'access': 'read-only', 'bit_offset': 1, 'bit_width': 1, 'description': 'Detection of SoT Error'},
'skewcalhs': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'Indicates the deskew reception'},
'stop_state': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': 'etection of stop state'}
}
},
'image_information_0': {'address_offset': 0x60, 'access': 'read-write;', 'size': 64, 'host_size': 8, 'description': 'Image Information 0 register', 'type': 'uint',
'fields': {
'byte_count': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 16, 'description': 'Byte count of current packet being processed by the control FSM'},
'line_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of long packets written to line buffer'},
'data_type': {'access': 'read-only', 'bit_offset': 32, 'bit_width': 6, 'description': 'Indicates the deskew reception'}
}
},
'image_information_1': {'address_offset': 0x68, 'access': 'read-write;', 'size': 64, 'host_size': 8, 'description': 'Image Information 1 register', 'type': 'uint',
'fields': {
'byte_count': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 16, 'description': 'Byte count of current packet being processed by the control FSM'},
'line_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of long packets written to line buffer'},
'data_type': {'access': 'read-only', 'bit_offset': 32, 'bit_width': 6, 'description': 'Indicates the deskew reception'}
}
},
'image_information_2': {'address_offset': 0x70, 'access': 'read-write;', 'size': 64, 'host_size': 8, 'description': 'Image Information 2 register', 'type': 'uint',
'fields': {
'byte_count': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 16, 'description': 'Byte count of current packet being processed by the control FSM'},
'line_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of long packets written to line buffer'},
'data_type': {'access': 'read-only', 'bit_offset': 32, 'bit_width': 6, 'description': 'Indicates the deskew reception'}
}
},
'image_information_3': {'address_offset': 0x78, 'access': 'read-write;', 'size': 64, 'host_size': 8, 'description': 'Image Information 3 register', 'type': 'uint',
'fields': {
'byte_count': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 16, 'description': 'Byte count of current packet being processed by the control FSM'},
'line_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of long packets written to line buffer'},
'data_type': {'access': 'read-only', 'bit_offset': 32, 'bit_width': 6, 'description': 'Indicates the deskew reception'}
}
},
'dphy_control': {'address_offset': 0x1000, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'Enable and soft reset control for PHY', 'type': 'uint',
'fields': {
'srst': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 1, 'description': 'Soft reset for D-PHY Controller'},
'dphy_en': {'access': 'read-write', 'bit_offset': 1, 'bit_width': 1, 'description': 'D-PHY Enabled'}
}
},
'dphy_idelay_tap': {'address_offset': 0x1004, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'Calibration of IDELAY in 7 series D-PHY RX configuration for lanes 1 to 4', 'type': 'uint',
'fields': {
'tap_lane0': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 5, 'description': 'Tap value for lane 0'},
'tap_lane1': {'access': 'read-write', 'bit_offset': 8, 'bit_width': 5, 'description': 'Tap value for lane 1'},
'tap_lane2': {'access': 'read-write', 'bit_offset': 16, 'bit_width': 5, 'description': 'Tap value for lane 2'},
'tap_lane3': {'access': 'read-write', 'bit_offset': 24, 'bit_width': 5, 'description': 'Tap value for lane 3'}
}
},
'dphy_init': {'address_offset': 0x1008, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'Initialization timer', 'type': 'uint'},
'dphy_hs_timeout': {'address_offset': 0x1010, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'Watchdog timeout in high-speed mode', 'type': 'uint'},
'dphy_esc_timeout': {'address_offset': 0x1014, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'ESC timeout', 'type': 'uint'},
'dphy_cl_status': {'address_offset': 0x1018, 'access': 'read-only;', 'size': 32, 'host_size': 4, 'description': 'Status register for PHY error reporting for clock Lane', 'type': 'uint',
'fields': {
'mode': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 2, 'description': 'Mode'},
'ulps': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': '(ULP State mode'},
'init_done': {'access': 'read-only', 'bit_offset': 3, 'bit_width': 1, 'description': 'Set after the lane has completed initialization'},
'stop_state': {'access': 'read-only', 'bit_offset': 4, 'bit_width': 1, 'description': 'Clock lane is in the Stop state'},
'err_control': {'access': 'read-only', 'bit_offset': 5, 'bit_width': 1, 'description': 'Clock lane control error'}
}
},
'dphy_dl0_status': {'address_offset': 0x101C, 'access': 'read-only;', 'size': 32, 'host_size': 4, 'description': 'Status register for PHY error reporting for clock Lane 0', 'type': 'uint',
'fields': {
'mode': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 2, 'description': 'Mode'},
'ulps': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': '(ULP State mode'},
'init_done': {'access': 'read-only', 'bit_offset': 3, 'bit_width': 1, 'description': 'Set after the lane has completed initialization'},
'hs_abort': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Set after the Data Lane High-Speed Timeout'},
'esc_abort': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'This bit is set after the Data Lane Escape Timeout'},
'stop_state': {'access': 'read-only', 'bit_offset': 6, 'bit_width': 1, 'description': 'Data lane is in the Stop state'},
'pkt_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of packets received or transmitted on the data lane'}
}
},
'dphy_dl1_status': {'address_offset': 0x1020, 'access': 'read-only;', 'size': 32, 'host_size': 4, 'description': 'Status register for PHY error reporting for clock Lane 1', 'type': 'uint',
'fields': {
'mode': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 2, 'description': 'Mode'},
'ulps': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': '(ULP State mode'},
'init_done': {'access': 'read-only', 'bit_offset': 3, 'bit_width': 1, 'description': 'Set after the lane has completed initialization'},
'hs_abort': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Set after the Data Lane High-Speed Timeout'},
'esc_abort': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'This bit is set after the Data Lane Escape Timeout'},
'stop_state': {'access': 'read-only', 'bit_offset': 6, 'bit_width': 1, 'description': 'Data lane is in the Stop state'},
'pkt_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of packets received or transmitted on the data lane'}
}
},
'dphy_dl2_status': {'address_offset': 0x1024, 'access': 'read-only;', 'size': 32, 'host_size': 4, 'description': 'Status register for PHY error reporting for clock Lane 2', 'type': 'uint',
'fields': {
'mode': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 2, 'description': 'Mode'},
'ulps': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': '(ULP State mode'},
'init_done': {'access': 'read-only', 'bit_offset': 3, 'bit_width': 1, 'description': 'Set after the lane has completed initialization'},
'hs_abort': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Set after the Data Lane High-Speed Timeout'},
'esc_abort': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'This bit is set after the Data Lane Escape Timeout'},
'stop_state': {'access': 'read-only', 'bit_offset': 6, 'bit_width': 1, 'description': 'Data lane is in the Stop state'},
'pkt_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of packets received or transmitted on the data lane'}
}
},
'dphy_dl3_status': {'address_offset': 0x1028, 'access': 'read-only;', 'size': 32, 'host_size': 4, 'description': 'Status register for PHY error reporting for clock Lane 3', 'type': 'uint',
'fields': {
'mode': {'access': 'read-only', 'bit_offset': 0, 'bit_width': 2, 'description': 'Mode'},
'ulps': {'access': 'read-only', 'bit_offset': 2, 'bit_width': 1, 'description': '(ULP State mode'},
'init_done': {'access': 'read-only', 'bit_offset': 3, 'bit_width': 1, 'description': 'Set after the lane has completed initialization'},
'hs_abort': {'access': 'read-write', 'bit_offset': 4, 'bit_width': 1, 'description': 'Set after the Data Lane High-Speed Timeout'},
'esc_abort': {'access': 'read-write', 'bit_offset': 5, 'bit_width': 1, 'description': 'This bit is set after the Data Lane Escape Timeout'},
'stop_state': {'access': 'read-only', 'bit_offset': 6, 'bit_width': 1, 'description': 'Data lane is in the Stop state'},
'pkt_count': {'access': 'read-only', 'bit_offset': 16, 'bit_width': 16, 'description': 'Number of packets received or transmitted on the data lane'}
}
},
'dphy_hs_settle0': {'address_offset': 0x1030, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'HS_SETTLE timing control for lane 0', 'type': 'uint',
'fields': {
'hs_settle_ns': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 9, 'description': 'HS_SETTLE timing parameter'}
}
},
'dphy_hs_settle1': {'address_offset': 0x1048, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'HS_SETTLE timing control for lane 1', 'type': 'uint',
'fields': {
'hs_settle_ns': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 9, 'description': 'HS_SETTLE timing parameter'}
}
},
'dphy_hs_settle2': {'address_offset': 0x104C, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'HS_SETTLE timing control for lane 2', 'type': 'uint',
'fields': {
'hs_settle_ns': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 9, 'description': 'HS_SETTLE timing parameter'}
}
},
'dphy_hs_settle3': {'address_offset': 0x1050, 'access': 'read-write;', 'size': 32, 'host_size': 4, 'description': 'HS_SETTLE timing control for lane 3', 'type': 'uint',
'fields': {
'hs_settle_ns': {'access': 'read-write', 'bit_offset': 0, 'bit_width': 9, 'description': 'HS_SETTLE timing parameter'}
}
}
}
class MipiRx(DefaultIP):
"""Driver for MIPI CSI-2 Receiver Subsystem"""
bindto = ['xilinx.com:ip:mipi_csi2_rx_subsystem:5.1']
def __init__(self, description):
description['registers'] = _registers
super().__init__(description)
| 98.630094
| 272
| 0.680164
| 4,411
| 31,463
| 4.729313
| 0.091816
| 0.09012
| 0.053066
| 0.117923
| 0.828532
| 0.811466
| 0.797181
| 0.785437
| 0.746896
| 0.734385
| 0
| 0.027943
| 0.143502
| 31,463
| 318
| 273
| 98.940252
| 0.746178
| 0.050536
| 0
| 0.321555
| 0
| 0.003534
| 0.711394
| 0.005932
| 0
| 0
| 0.005094
| 0
| 0.116608
| 1
| 0.003534
| false
| 0
| 0.003534
| 0
| 0.014134
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a0a5a6acbd8ed8099c5ce1629a32553bf52d632
| 12,218
|
py
|
Python
|
tests/test_Bioneuron.py
|
MarcJavin/ODYNN
|
8187b8db71c8621d17bce94749aecc0a81f11e10
|
[
"MIT"
] | 1
|
2019-01-08T15:55:29.000Z
|
2019-01-08T15:55:29.000Z
|
tests/test_Bioneuron.py
|
MarcusJP/ODYNN
|
8187b8db71c8621d17bce94749aecc0a81f11e10
|
[
"MIT"
] | 7
|
2019-12-16T21:03:57.000Z
|
2022-02-10T00:01:22.000Z
|
tests/test_Bioneuron.py
|
MarcusJP/ODYNN
|
8187b8db71c8621d17bce94749aecc0a81f11e10
|
[
"MIT"
] | 2
|
2019-11-23T10:21:09.000Z
|
2019-12-13T16:02:46.000Z
|
from unittest import TestCase
from odynn.neuron import BioNeuronTf, PyBioNeuron
from odynn import utils
from odynn.models import cfg_model
import numpy as np
import pickle
import tensorflow as tf
p = PyBioNeuron.default_params
class TestNeuronTf(TestCase):
dir = utils.set_dir('unittest')
def test_init(self):
hh = BioNeuronTf(init_p=[p for _ in range(10)])
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 10)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state),hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (10,))
self.assertEqual(hh.init_params.keys(), p.keys())
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = BioNeuronTf(init_p={var: [val for _ in range(10)] for var, val in p.items()})
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 10)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (10,))
self.assertEqual(hh.init_params.keys(), p.keys())
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = BioNeuronTf(init_p=[PyBioNeuron.get_random() for _ in range(13)])
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 13)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (13,))
self.assertEqual(hh.init_params.keys(), p.keys())
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = BioNeuronTf(p)
self.assertEqual(hh.num, 1)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state),))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(hh.init_params, p)
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = BioNeuronTf(n_rand=15)
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 15)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (15,))
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
def test_init_groups(self):
hh = BioNeuronTf(n_rand=15, groups=np.zeros(15, dtype=np.int32))
hh.reset()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
pars = sess.run(hh._param['C_m'])
for i in range(1,15):
self.assertEqual(pars[i-1], pars[i])
hh = BioNeuronTf(n_rand=6, groups=[1,0,0,0,2,2])
hh = BioNeuronTf(n_rand=3, groups=[1, 0, 0, 0, 2, 2])
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh.reset()
loss = tf.square(hh._param['C_m'] - [1.,20.,80.,-10.,-7.,90.])
train = tf.train.AdamOptimizer(0.1).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
pars = sess.run(hh._param['C_m'])
self.assertEqual(pars[1], pars[2])
self.assertEqual(pars[1], pars[3])
self.assertEqual(pars[4], pars[5])
sess.run(train)
pars = sess.run(hh._param['C_m'])
self.assertEqual(pars[1], pars[2])
self.assertEqual(pars[1], pars[3])
self.assertEqual(pars[4], pars[5])
with self.assertRaises(ValueError):
hh = BioNeuronTf(n_rand=2, groups=[1, 0, 0, 0, 2, 2])
def test_pickle(self):
hh1 = BioNeuronTf(init_p=[p for _ in range(10)])
with open(self.dir + 'yeee', 'wb') as f:
pickle.dump(hh1, f)
with open(self.dir + 'yeee', 'rb') as f:
hh = pickle.load(f)
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 10)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (10,))
self.assertEqual(hh.init_params.keys(), p.keys())
self.assertEqual(hh1.parameter_names, hh.parameter_names)
hh = BioNeuronTf(init_p={var: [val for _ in range(10)] for var, val in p.items()})
with open(self.dir + 'yeee', 'wb') as f:
pickle.dump(hh, f)
with open(self.dir + 'yeee', 'rb') as f:
hh = pickle.load(f)
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 10)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (10,))
self.assertEqual(hh.init_params.keys(), p.keys())
hh = BioNeuronTf(init_p=[PyBioNeuron.get_random() for _ in range(13)])
with open(self.dir + 'yeee', 'wb') as f:
pickle.dump(hh, f)
with open(self.dir + 'yeee', 'rb') as f:
hh = pickle.load(f)
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 13)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (13,))
self.assertEqual(hh.init_params.keys(), p.keys())
hh = BioNeuronTf(n_rand=15)
with open(self.dir + 'yeee', 'wb') as f:
pickle.dump(hh, f)
with open(self.dir + 'yeee', 'rb') as f:
hh = pickle.load(f)
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 15)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(list(hh.init_params.values())[0].shape, (15,))
hh = BioNeuronTf(p)
with open(self.dir + 'yeee', 'wb') as f:
pickle.dump(hh, f)
with open(self.dir + 'yeee', 'rb') as f:
hh = pickle.load(f)
self.assertEqual(hh.num, 1)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state),))
self.assertIsInstance(hh.init_params, dict)
self.assertEqual(hh.init_params, p)
def test_parallelize(self):
n = BioNeuronTf(init_p=p)
sh = list(n._init_state.shape)
n.parallelize(10)
sh.append(10)
shp = (10,)
self.assertEqual(n._init_state.shape, tuple(sh))
self.assertEqual(list(n.init_params.values())[0].shape, tuple(shp))
n = BioNeuronTf(init_p=[p for _ in range(8)])
sh = list(n._init_state.shape)
shp = list(list(n.init_params.values())[0].shape)
n.parallelize(11)
sh.append(11)
shp.append(11)
self.assertEqual(n._init_state.shape, tuple(sh))
self.assertEqual(list(n.init_params.values())[0].shape, tuple(shp))
def test_build_graph(self):
n = BioNeuronTf(init_p=p)
nn = BioNeuronTf(init_p=[p for _ in range(8)])
i,res = n.build_graph()
self.assertEqual(i.get_shape().as_list(), [None])
i, res = n.build_graph(3)
self.assertEqual(i.get_shape().as_list(), [None, None])
i, res = n.build_graph(batch=1)
self.assertEqual(i.get_shape().as_list(), [None, None])
i, res = nn.build_graph()
self.assertEqual(i.get_shape().as_list(), [None, 8])
i, res = nn.build_graph(1)
self.assertEqual(i.get_shape().as_list(), [None, None, 8])
i, res = nn.build_graph(batch=4)
self.assertEqual(i.get_shape().as_list(), [None, None, 8])
def test_calculate(self):
n = BioNeuronTf(init_p=p)
nn = BioNeuronTf(init_p=[p for _ in range(8)])
i = np.array([2., 3., 0.])
ii = np.array([[2., 2.], [3., 3.], [0., 0.]])
x = n.calculate(i)
self.assertEqual(n._init_state.shape[0], x.shape[1])
self.assertEqual(x.shape[0], len(i))
x = n.calculate(ii)
self.assertEqual(ii.shape[1], x.shape[2])
self.assertEqual(x.shape[0], ii.shape[0]) # same time
self.assertEqual(x.shape[1], n._init_state.shape[0])
self.assertEqual(x.shape[2], ii.shape[1]) # same nb of batch
x = nn.calculate(i) #several neurons, one batch
self.assertEqual(x.shape[-1], nn.num)
self.assertEqual(x.shape[0], len(i))
self.assertEqual(x.shape[1], nn._init_state.shape[0])
xx2 = nn.calculate(ii) #several neurons, several batches
xx = nn.calculate(np.stack([ii for _ in range(8)], axis=ii.ndim)) # several neurons, several batches
self.assertEqual(xx.shape[-1], nn.num)
self.assertEqual(xx.shape[0], ii.shape[0]) # same time
self.assertEqual(xx.shape[1], nn._init_state.shape[0])
self.assertEqual(xx.shape[2], ii.shape[1]) # same nb of batch
self.assertEqual(xx.all(), xx2.all())
class TestNeuronFix(TestCase):
def test_init(self):
hh = PyBioNeuron(init_p=[p for _ in range(10)])
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 10)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state),hh.num))
self.assertIsInstance(hh._param, dict)
self.assertEqual(list(hh._param.values())[0].shape, (10,))
self.assertEqual(hh._param.keys(), p.keys())
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = PyBioNeuron(init_p={var: np.array([val for _ in range(10)]) for var, val in p.items()})
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 10)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh._param, dict)
self.assertEqual(list(hh._param.values())[0].shape, (10,))
self.assertEqual(hh._param.keys(), p.keys())
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = PyBioNeuron(init_p=[PyBioNeuron.get_random() for _ in range(13)])
self.assertEqual(len(hh._init_state), len(hh.default_init_state))
self.assertEqual(hh.num, 13)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state), hh.num))
self.assertIsInstance(hh._param, dict)
self.assertEqual(list(hh._param.values())[0].shape, (13,))
self.assertEqual(hh._param.keys(), p.keys())
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
hh = PyBioNeuron(p)
self.assertEqual(hh.num, 1)
self.assertEqual(hh._init_state.shape, (len(hh.default_init_state),))
self.assertIsInstance(hh._param, dict)
self.assertEqual(hh._param, p)
self.assertEqual(hh.parameter_names, list(hh.default_params.keys()))
def test_step(self):
hh = PyBioNeuron(p)
x = hh.step(hh.init_state, 2.)
self.assertEqual(hh._init_state.shape, x.shape)
hh = PyBioNeuron(init_p=[PyBioNeuron.get_random() for _ in range(13)])
x = hh.step(hh.init_state, 2.)
self.assertEqual(hh._init_state.shape, x.shape)
def test_calculate(self):
hh = PyBioNeuron(p)
i = [2., 3., 0.]
x = hh.calculate(i)
self.assertEqual(hh._init_state.shape, x[-1].shape)
self.assertEqual(x.shape[0], len(i))
i = np.array([[2., 2.], [3., 3.], [0., 0.]])
x = hh.calculate(i)
self.assertEqual(i.shape[1], x[-1].shape[1])
self.assertEqual(x.shape[0], i.shape[0]) #same time
self.assertEqual(x.shape[1], hh._init_state.shape[0])
self.assertEqual(x.shape[2], i.shape[1]) #same nb of batch
| 44.591241
| 109
| 0.627762
| 1,764
| 12,218
| 4.193878
| 0.074263
| 0.227088
| 0.12179
| 0.054069
| 0.853609
| 0.842931
| 0.808327
| 0.760611
| 0.751419
| 0.715328
| 0
| 0.022987
| 0.213128
| 12,218
| 273
| 110
| 44.754579
| 0.746516
| 0.013832
| 0
| 0.676596
| 0
| 0
| 0.006645
| 0
| 0
| 0
| 0
| 0
| 0.540426
| 1
| 0.038298
| false
| 0
| 0.029787
| 0
| 0.080851
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e15b69685093d24661b970121a1df289215a20f1
| 33,381
|
py
|
Python
|
sdk/python/pulumi_azure/apimanagement/subscription.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/apimanagement/subscription.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/apimanagement/subscription.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
api_management_name: pulumi.Input[str],
display_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
allow_tracing: Optional[pulumi.Input[bool]] = None,
api_id: Optional[pulumi.Input[str]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: The display name of this Subscription.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] allow_tracing: Determines whether tracing can be enabled. Defaults to `true`.
:param pulumi.Input[str] api_id: The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] primary_key: The primary subscription key to use for the subscription.
:param pulumi.Input[str] product_id: The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] secondary_key: The secondary subscription key to use for the subscription.
:param pulumi.Input[str] state: The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
:param pulumi.Input[str] subscription_id: An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_id: The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "api_management_name", api_management_name)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if allow_tracing is not None:
pulumi.set(__self__, "allow_tracing", allow_tracing)
if api_id is not None:
pulumi.set(__self__, "api_id", api_id)
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
if state is not None:
pulumi.set(__self__, "state", state)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Input[str]:
"""
The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The display name of this Subscription.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="allowTracing")
def allow_tracing(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether tracing can be enabled. Defaults to `true`.
"""
return pulumi.get(self, "allow_tracing")
@allow_tracing.setter
def allow_tracing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_tracing", value)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_id")
@api_id.setter
def api_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_id", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The primary subscription key to use for the subscription.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The secondary subscription key to use for the subscription.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
@pulumi.input_type
class _SubscriptionState:
def __init__(__self__, *,
allow_tracing: Optional[pulumi.Input[bool]] = None,
api_id: Optional[pulumi.Input[str]] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Subscription resources.
:param pulumi.Input[bool] allow_tracing: Determines whether tracing can be enabled. Defaults to `true`.
:param pulumi.Input[str] api_id: The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_management_name: The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: The display name of this Subscription.
:param pulumi.Input[str] primary_key: The primary subscription key to use for the subscription.
:param pulumi.Input[str] product_id: The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] secondary_key: The secondary subscription key to use for the subscription.
:param pulumi.Input[str] state: The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
:param pulumi.Input[str] subscription_id: An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_id: The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
if allow_tracing is not None:
pulumi.set(__self__, "allow_tracing", allow_tracing)
if api_id is not None:
pulumi.set(__self__, "api_id", api_id)
if api_management_name is not None:
pulumi.set(__self__, "api_management_name", api_management_name)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
if state is not None:
pulumi.set(__self__, "state", state)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="allowTracing")
def allow_tracing(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether tracing can be enabled. Defaults to `true`.
"""
return pulumi.get(self, "allow_tracing")
@allow_tracing.setter
def allow_tracing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_tracing", value)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_id")
@api_id.setter
def api_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_id", value)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of this Subscription.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The primary subscription key to use for the subscription.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The secondary subscription key to use for the subscription.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_tracing: Optional[pulumi.Input[bool]] = None,
api_id: Optional[pulumi.Input[str]] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Subscription within a API Management Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_service = azure.apimanagement.get_service(name="example-apim",
resource_group_name="example-resources")
example_product = azure.apimanagement.get_product(product_id="00000000-0000-0000-0000-000000000000",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_user = azure.apimanagement.get_user(user_id="11111111-1111-1111-1111-111111111111",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_subscription = azure.apimanagement.Subscription("exampleSubscription",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name,
user_id=example_user.id,
product_id=example_product.id,
display_name="Parser API")
```
## Import
API Management Subscriptions can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/subscription:Subscription example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.ApiManagement/service/example-apim/subscriptions/subscription-name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_tracing: Determines whether tracing can be enabled. Defaults to `true`.
:param pulumi.Input[str] api_id: The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_management_name: The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: The display name of this Subscription.
:param pulumi.Input[str] primary_key: The primary subscription key to use for the subscription.
:param pulumi.Input[str] product_id: The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] secondary_key: The secondary subscription key to use for the subscription.
:param pulumi.Input[str] state: The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
:param pulumi.Input[str] subscription_id: An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_id: The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Subscription within a API Management Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_service = azure.apimanagement.get_service(name="example-apim",
resource_group_name="example-resources")
example_product = azure.apimanagement.get_product(product_id="00000000-0000-0000-0000-000000000000",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_user = azure.apimanagement.get_user(user_id="11111111-1111-1111-1111-111111111111",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_subscription = azure.apimanagement.Subscription("exampleSubscription",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name,
user_id=example_user.id,
product_id=example_product.id,
display_name="Parser API")
```
## Import
API Management Subscriptions can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/subscription:Subscription example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.ApiManagement/service/example-apim/subscriptions/subscription-name
```
:param str resource_name: The name of the resource.
:param SubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_tracing: Optional[pulumi.Input[bool]] = None,
api_id: Optional[pulumi.Input[str]] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
__props__.__dict__["allow_tracing"] = allow_tracing
__props__.__dict__["api_id"] = api_id
if api_management_name is None and not opts.urn:
raise TypeError("Missing required property 'api_management_name'")
__props__.__dict__["api_management_name"] = api_management_name
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["primary_key"] = primary_key
__props__.__dict__["product_id"] = product_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["secondary_key"] = secondary_key
__props__.__dict__["state"] = state
__props__.__dict__["subscription_id"] = subscription_id
__props__.__dict__["user_id"] = user_id
super(Subscription, __self__).__init__(
'azure:apimanagement/subscription:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_tracing: Optional[pulumi.Input[bool]] = None,
api_id: Optional[pulumi.Input[str]] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None) -> 'Subscription':
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_tracing: Determines whether tracing can be enabled. Defaults to `true`.
:param pulumi.Input[str] api_id: The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_management_name: The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: The display name of this Subscription.
:param pulumi.Input[str] primary_key: The primary subscription key to use for the subscription.
:param pulumi.Input[str] product_id: The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] secondary_key: The secondary subscription key to use for the subscription.
:param pulumi.Input[str] state: The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
:param pulumi.Input[str] subscription_id: An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
:param pulumi.Input[str] user_id: The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubscriptionState.__new__(_SubscriptionState)
__props__.__dict__["allow_tracing"] = allow_tracing
__props__.__dict__["api_id"] = api_id
__props__.__dict__["api_management_name"] = api_management_name
__props__.__dict__["display_name"] = display_name
__props__.__dict__["primary_key"] = primary_key
__props__.__dict__["product_id"] = product_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["secondary_key"] = secondary_key
__props__.__dict__["state"] = state
__props__.__dict__["subscription_id"] = subscription_id
__props__.__dict__["user_id"] = user_id
return Subscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowTracing")
def allow_tracing(self) -> pulumi.Output[Optional[bool]]:
"""
Determines whether tracing can be enabled. Defaults to `true`.
"""
return pulumi.get(self, "allow_tracing")
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the API which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Output[str]:
"""
The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name of this Subscription.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Output[str]:
"""
The primary subscription key to use for the subscription.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the Product which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "product_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> pulumi.Output[str]:
"""
The secondary subscription key to use for the subscription.
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[Optional[str]]:
"""
The state of this Subscription. Possible values are `active`, `cancelled`, `expired`, `rejected`, `submitted` and `suspended`. Defaults to `submitted`.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[str]:
"""
An Identifier which should used as the ID of this Subscription. If not specified a new Subscription ID will be generated. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the User which should be assigned to this Subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "user_id")
| 49.017621
| 250
| 0.668824
| 4,157
| 33,381
| 5.161896
| 0.047149
| 0.075357
| 0.086122
| 0.083046
| 0.923292
| 0.913692
| 0.907867
| 0.899571
| 0.894958
| 0.872821
| 0
| 0.00757
| 0.236212
| 33,381
| 680
| 251
| 49.089706
| 0.834052
| 0.399958
| 0
| 0.802111
| 1
| 0
| 0.102758
| 0.004743
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163588
| false
| 0.002639
| 0.013193
| 0
| 0.274406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e1619b0d7a3f2c578df87490737f6bdf3ac5678e
| 3,315
|
py
|
Python
|
test/rxd/3d/test_pure_diffusion_3d.py
|
niltonlk/nrn
|
464541abbf72fe58de77b16bf0e1df425a280b89
|
[
"BSD-3-Clause"
] | 203
|
2018-05-03T11:02:11.000Z
|
2022-03-31T14:18:31.000Z
|
test/rxd/3d/test_pure_diffusion_3d.py
|
niltonlk/nrn
|
464541abbf72fe58de77b16bf0e1df425a280b89
|
[
"BSD-3-Clause"
] | 1,228
|
2018-04-25T09:00:48.000Z
|
2022-03-31T21:42:21.000Z
|
test/rxd/3d/test_pure_diffusion_3d.py
|
niltonlk/nrn
|
464541abbf72fe58de77b16bf0e1df425a280b89
|
[
"BSD-3-Clause"
] | 134
|
2018-04-23T09:14:13.000Z
|
2022-03-16T08:57:11.000Z
|
import pytest
import numpy
from testutils import compare_data, tol
@pytest.fixture
def ics_pure_diffusion(neuron_instance):
"""A model using intracellular diffusion in a single section in 3D"""
h, rxd, data, save_path = neuron_instance
dend = h.Section(name="dend")
dend.diam = 2
dend.nseg = 11
dend.L = 5
rxd.set_solve_type(dimension=3)
diff_constant = 1
r = rxd.Region(h.allsec(), dx=0.75)
ca = rxd.Species(
r, d=diff_constant, initial=lambda node: 1 if 0.4 < node.x < 0.6 else 0
)
model = (dend, r, ca)
yield (neuron_instance, model)
def test_pure_diffusion_3d(ics_pure_diffusion):
"""Test ics_pure_diffusion with fixed step methods"""
neuron_instance, model = ics_pure_diffusion
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
h.dt *= 50
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
if not save_path:
assert max_err < tol
def test_pure_diffusion_3d_cvode(ics_pure_diffusion):
"""Test ics_pure_diffusion with variable step methods"""
neuron_instance, model = ics_pure_diffusion
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
h.CVode().active(True)
vec = h.Vector()
h.CVode().states(vec)
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
if not save_path:
assert max_err < tol
def test_pure_diffusion_3d_inhom(ics_pure_diffusion):
"""Test ics_pure_diffusion with fixed step methods and inhomogeneous
diffusion coefficients.
"""
neuron_instance, model = ics_pure_diffusion
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
h.dt *= 50
for nd in ca.nodes:
if nd.x >= 0.5:
nd.d = 0
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
if not save_path:
assert max_err < tol
def test_pure_diffusion_3d_inhom_cvode(ics_pure_diffusion):
"""Test ics_pure_diffusion with variable step methods and inhomogeneous
diffusion coefficients.
"""
neuron_instance, model = ics_pure_diffusion
h, rxd, data, save_path = neuron_instance
dend, r, ca = model
h.CVode().active(True)
for nd in ca.nodes:
if nd.x >= 0.5:
nd.d = 0
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path:
assert loss < tol
max_err = compare_data(data)
if not save_path:
assert max_err < tol
| 31.571429
| 86
| 0.664253
| 486
| 3,315
| 4.360082
| 0.195473
| 0.059462
| 0.090609
| 0.128362
| 0.822086
| 0.811704
| 0.811704
| 0.811704
| 0.795658
| 0.795658
| 0
| 0.01932
| 0.219306
| 3,315
| 104
| 87
| 31.875
| 0.799459
| 0.104072
| 0
| 0.719512
| 0
| 0
| 0.001367
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 1
| 0.060976
| false
| 0
| 0.036585
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e17d2802183376306f36b59eb0e1fe33bfe1515b
| 695
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/_api/v1/data/__init__.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/_api/v1/data/__init__.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/_api/v1/data/__init__.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""`tf.data.Dataset` API for input pipelines.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
"""
from __future__ import print_function
from tensorflow._api.v1.data import experimental
from tensorflow.python.data import Dataset
from tensorflow.python.data import FixedLengthRecordDataset
from tensorflow.python.data import Iterator
from tensorflow.python.data import TFRecordDataset
from tensorflow.python.data import TextLineDataset
from tensorflow.python.data.ops.dataset_ops import Options
del print_function
| 34.75
| 83
| 0.805755
| 92
| 695
| 5.978261
| 0.48913
| 0.203636
| 0.218182
| 0.261818
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001637
| 0.120863
| 695
| 19
| 84
| 36.578947
| 0.898527
| 0.355396
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.888889
| 0
| 0.888889
| 0.222222
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.