hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
cb5bc831fa8044e884fd9580ba3c3a2a5498174b
2,315
py
Python
example.py
sumitsj/workflow-core
0e19beb744778e579d310d8933b7e195ad7d8c58
[ "MIT" ]
null
null
null
example.py
sumitsj/workflow-core
0e19beb744778e579d310d8933b7e195ad7d8c58
[ "MIT" ]
null
null
null
example.py
sumitsj/workflow-core
0e19beb744778e579d310d8933b7e195ad7d8c58
[ "MIT" ]
null
null
null
from workflow.job import Job from workflow.status import Status from workflow.process import Process job = Job(id=1, name="test", process_list=["Process-A", "Process-B", "Process-C"]) print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) job.start() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.put_on_hold() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.resume() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) job.update_job() print(job.processes[0].name, job.processes[0].status) print(job.processes[1].name, job.processes[1].status) print(job.processes[2].name, job.processes[2].status) print("********JSON Representation**********") job_string = job.to_json() print(job_string) print(type(job_string)) print("********Parse JSON**********") job = Job.parse(job_string) print(type(job)) print(job.to_json())
38.583333
82
0.74514
379
2,315
4.506596
0.079156
0.449649
0.318501
0.269321
0.854215
0.829625
0.829625
0.829625
0.829625
0.829625
0
0.029505
0.04838
2,315
59
83
39.237288
0.745801
0
0
0.727273
0
0
0.041469
0.010367
0
0
0
0
0
1
0
false
0
0.054545
0
0.054545
0.690909
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
11
cbbf29133a1b28c74ac870af01eab7cacc988061
244
py
Python
django_rdkit/models/__init__.py
paconius/django-rdkit
9af9545afaad7884eecdf76ab8311af35f82ece4
[ "BSD-3-Clause" ]
null
null
null
django_rdkit/models/__init__.py
paconius/django-rdkit
9af9545afaad7884eecdf76ab8311af35f82ece4
[ "BSD-3-Clause" ]
null
null
null
django_rdkit/models/__init__.py
paconius/django-rdkit
9af9545afaad7884eecdf76ab8311af35f82ece4
[ "BSD-3-Clause" ]
null
null
null
from __future__ import unicode_literals from django.db.models import * # Chem aggregate functions # future -> from django_rdkit.models.aggregates import * from django_rdkit.models.fields import * from django_rdkit.models.functions import *
24.4
57
0.807377
32
244
5.90625
0.4375
0.21164
0.238095
0.333333
0.285714
0
0
0
0
0
0
0
0.127049
244
9
58
27.111111
0.887324
0.32377
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
cbe18da20e86764183219654512b37a58335d84b
2,712
py
Python
problems/test_ic_10_bst_2nd_largest.py
gregdferrell/algo
974ae25b028d49bcb7ded6655a7e11dcf6aa221d
[ "MIT" ]
null
null
null
problems/test_ic_10_bst_2nd_largest.py
gregdferrell/algo
974ae25b028d49bcb7ded6655a7e11dcf6aa221d
[ "MIT" ]
null
null
null
problems/test_ic_10_bst_2nd_largest.py
gregdferrell/algo
974ae25b028d49bcb7ded6655a7e11dcf6aa221d
[ "MIT" ]
null
null
null
import pytest from .ic_10_bst_2nd_largest import find_second_largest_item_bst, find_second_largest_item_bst_no_parent from .problem_solve_util import BinaryTreeNode def test_find_second_largest_item_bst_not_enough_items(): root_node = BinaryTreeNode(10, None) with pytest.raises(ValueError) as e: find_second_largest_item_bst(root_node) def test_find_second_largest_item_bst_traverse_right_no_left(): root_node = BinaryTreeNode(10, None) new_node = root_node.insert_right(11) new_node = new_node.insert_right(12) node_found = find_second_largest_item_bst(root_node) assert 11 == node_found.value def test_find_second_largest_item_bst_traverse_right_left_subtree(): root_node = BinaryTreeNode(10, None) new_node = root_node.insert_right(11) new_node = new_node.insert_right(12) new_node = new_node.insert_right(20) new_node = new_node.insert_left(15) new_node.insert_left(14) new_node = new_node.insert_right(16) new_node = new_node.insert_right(17) new_node = new_node.insert_right(18) new_node = new_node.insert_right(19) node_found = find_second_largest_item_bst(root_node) assert 19 == node_found.value def test_find_second_largest_item_bst_traverse_left_subtree(): root_node = BinaryTreeNode(10, None) new_node = root_node.insert_left(5) new_node = new_node.insert_right(6) node_found = find_second_largest_item_bst(root_node) assert 6 == node_found.value def test_find_second_largest_item_bst_no_parent_not_enough_items(): root_node = BinaryTreeNode(10, None) with pytest.raises(ValueError) as e: find_second_largest_item_bst_no_parent(root_node) def test_find_second_largest_item_bst_no_parent_traverse_right_no_left(): root_node = BinaryTreeNode(10, None) new_node = root_node.insert_right(11) new_node = new_node.insert_right(12) node_found = find_second_largest_item_bst_no_parent(root_node) assert 11 == node_found.value def test_find_second_largest_item_bst_no_parent_traverse_right_left_subtree(): root_node = BinaryTreeNode(10, None) new_node = root_node.insert_right(11) new_node = new_node.insert_right(12) new_node = new_node.insert_right(20) new_node = new_node.insert_left(15) new_node.insert_left(14) new_node = new_node.insert_right(16) new_node = new_node.insert_right(17) new_node = new_node.insert_right(18) new_node = new_node.insert_right(19) node_found = find_second_largest_item_bst_no_parent(root_node) assert 19 == node_found.value def test_find_second_largest_item_bst_no_parent_traverse_left_subtree(): root_node = BinaryTreeNode(10, None) new_node = root_node.insert_left(5) new_node = new_node.insert_right(6) node_found = find_second_largest_item_bst_no_parent(root_node) assert 6 == node_found.value
31.172414
103
0.826327
455
2,712
4.406593
0.10989
0.153616
0.149626
0.188529
0.960599
0.948628
0.948628
0.909227
0.909227
0.881796
0
0.031429
0.096608
2,712
86
104
31.534884
0.786939
0
0
0.786885
0
0
0
0
0
0
0
0
0.098361
1
0.131148
false
0
0.04918
0
0.180328
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
1dab2a4988002642e1d545d63b0548bbb2f9f6dc
279
py
Python
photologue/tests/__init__.py
TAMUArch/django-photologue
e153dd84715b1dd0bea3ac869cd9fcd9bf01e057
[ "BSD-3-Clause" ]
null
null
null
photologue/tests/__init__.py
TAMUArch/django-photologue
e153dd84715b1dd0bea3ac869cd9fcd9bf01e057
[ "BSD-3-Clause" ]
null
null
null
photologue/tests/__init__.py
TAMUArch/django-photologue
e153dd84715b1dd0bea3ac869cd9fcd9bf01e057
[ "BSD-3-Clause" ]
null
null
null
from photologue.tests.effect import * from photologue.tests.gallery import * from photologue.tests.photo import * from photologue.tests.resize import * from photologue.tests.views_photo import * from photologue.tests.views_gallery import * from photologue.tests.sitemap import *
34.875
44
0.824373
37
279
6.162162
0.27027
0.429825
0.583333
0.657895
0.697368
0
0
0
0
0
0
0
0.100358
279
7
45
39.857143
0.908367
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
1df9edaad04e189c96135ae3ba439d7970230d7b
101
py
Python
getting_started_python/src/greetings/greetings.py
Tatskaari/please-codelabs
d3bc47e988ef6dbf1e7fa19ddde45aa0b32ef1db
[ "Apache-2.0" ]
4
2021-07-02T20:05:43.000Z
2022-02-03T09:57:13.000Z
getting_started_python/src/greetings/greetings.py
Tatskaari/please-codelabs
d3bc47e988ef6dbf1e7fa19ddde45aa0b32ef1db
[ "Apache-2.0" ]
10
2021-07-24T23:18:37.000Z
2022-03-18T00:58:05.000Z
getting_started_python/src/greetings/greetings.py
ekmixon/please-codelabs
4eb03ed56d870b87199d3a22d047dea3cd673900
[ "Apache-2.0" ]
2
2021-03-13T18:49:15.000Z
2021-07-02T20:05:46.000Z
from numpy import random def greeting(): return random.choice(["Hello", "Bonjour", "Marhabaan"])
25.25
59
0.70297
12
101
5.916667
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.138614
101
4
59
25.25
0.816092
0
0
0
0
0
0.205882
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
7
3816121331bf0614816e5725fb91b867c274486b
89,277
py
Python
sdk/python/pulumi_aiven/service_integration.py
pulumi/pulumi-aiven
0d330ef43c17ce2d2a77588c1d9754de6c8ca736
[ "ECL-2.0", "Apache-2.0" ]
7
2019-11-28T22:30:11.000Z
2021-12-27T16:40:54.000Z
sdk/python/pulumi_aiven/service_integration.py
pulumi/pulumi-aiven
0d330ef43c17ce2d2a77588c1d9754de6c8ca736
[ "ECL-2.0", "Apache-2.0" ]
97
2019-12-17T09:58:57.000Z
2022-03-31T15:19:02.000Z
sdk/python/pulumi_aiven/service_integration.py
pulumi/pulumi-aiven
0d330ef43c17ce2d2a77588c1d9754de6c8ca736
[ "ECL-2.0", "Apache-2.0" ]
1
2020-11-24T12:22:38.000Z
2020-11-24T12:22:38.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities from . import outputs from ._inputs import * __all__ = ['ServiceIntegrationArgs', 'ServiceIntegration'] @pulumi.input_type class ServiceIntegrationArgs: def __init__(__self__, *, integration_type: pulumi.Input[str], project: pulumi.Input[str], dashboard_user_config: Optional[pulumi.Input['ServiceIntegrationDashboardUserConfigArgs']] = None, datadog_user_config: Optional[pulumi.Input['ServiceIntegrationDatadogUserConfigArgs']] = None, destination_endpoint_id: Optional[pulumi.Input[str]] = None, destination_service_name: Optional[pulumi.Input[str]] = None, external_aws_cloudwatch_logs_user_config: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']] = None, external_aws_cloudwatch_metrics_user_config: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']] = None, external_elasticsearch_logs_user_config: Optional[pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']] = None, external_google_cloud_logging_user_config: Optional[pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']] = None, kafka_connect_user_config: Optional[pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs']] = None, kafka_logs_user_config: Optional[pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs']] = None, kafka_mirrormaker_user_config: Optional[pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs']] = None, logs_user_config: Optional[pulumi.Input['ServiceIntegrationLogsUserConfigArgs']] = None, m3aggregator_user_config: Optional[pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs']] = None, m3coordinator_user_config: Optional[pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs']] = None, metrics_user_config: Optional[pulumi.Input['ServiceIntegrationMetricsUserConfigArgs']] = None, mirrormaker_user_config: Optional[pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs']] = None, prometheus_user_config: Optional[pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs']] = None, read_replica_user_config: Optional[pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs']] = None, rsyslog_user_config: Optional[pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs']] = None, schema_registry_proxy_user_config: Optional[pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']] = None, signalfx_user_config: Optional[pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs']] = None, source_endpoint_id: Optional[pulumi.Input[str]] = None, source_service_name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a ServiceIntegration resource. :param pulumi.Input[str] integration_type: identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. :param pulumi.Input[str] project: defines the project the integration belongs to. :param pulumi.Input['ServiceIntegrationDashboardUserConfigArgs'] dashboard_user_config: Dashboard specific user configurable settings :param pulumi.Input['ServiceIntegrationDatadogUserConfigArgs'] datadog_user_config: Dashboard specific user configurable settings :param pulumi.Input[str] destination_endpoint_id: or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] destination_service_name: Destination service for the integration (if any) :param pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs'] external_aws_cloudwatch_logs_user_config: External AWS Cloudwatch logs specific user configurable settings :param pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs'] external_aws_cloudwatch_metrics_user_config: External AWS cloudwatch metrics specific user configurable settings :param pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs'] external_elasticsearch_logs_user_config: External Elasticsearch logs specific user configurable settings :param pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs'] external_google_cloud_logging_user_config: External Google Cloud Logging specific user configurable settings :param pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs'] kafka_connect_user_config: Kafka Connect specific user configurable settings :param pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs'] kafka_logs_user_config: Kafka Logs specific user configurable settings :param pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs'] kafka_mirrormaker_user_config: Mirrormaker 2 integration specific user configurable settings :param pulumi.Input['ServiceIntegrationLogsUserConfigArgs'] logs_user_config: Log integration specific user configurable settings :param pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs'] m3aggregator_user_config: M3 aggregator specific user configurable settings :param pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs'] m3coordinator_user_config: M3 coordinator specific user configurable settings :param pulumi.Input['ServiceIntegrationMetricsUserConfigArgs'] metrics_user_config: Metrics specific user configurable settings :param pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs'] mirrormaker_user_config: Mirrormaker 1 integration specific user configurable settings :param pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs'] prometheus_user_config: Prometheus coordinator specific user configurable settings :param pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs'] read_replica_user_config: PG Read replica specific user configurable settings :param pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs'] rsyslog_user_config: RSyslog specific user configurable settings :param pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs'] schema_registry_proxy_user_config: Schema registry proxy specific user configurable settings :param pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs'] signalfx_user_config: Signalfx specific user configurable settings :param pulumi.Input[str] source_endpoint_id: or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] source_service_name: Source service for the integration (if any) """ pulumi.set(__self__, "integration_type", integration_type) pulumi.set(__self__, "project", project) if dashboard_user_config is not None: pulumi.set(__self__, "dashboard_user_config", dashboard_user_config) if datadog_user_config is not None: pulumi.set(__self__, "datadog_user_config", datadog_user_config) if destination_endpoint_id is not None: pulumi.set(__self__, "destination_endpoint_id", destination_endpoint_id) if destination_service_name is not None: pulumi.set(__self__, "destination_service_name", destination_service_name) if external_aws_cloudwatch_logs_user_config is not None: pulumi.set(__self__, "external_aws_cloudwatch_logs_user_config", external_aws_cloudwatch_logs_user_config) if external_aws_cloudwatch_metrics_user_config is not None: pulumi.set(__self__, "external_aws_cloudwatch_metrics_user_config", external_aws_cloudwatch_metrics_user_config) if external_elasticsearch_logs_user_config is not None: pulumi.set(__self__, "external_elasticsearch_logs_user_config", external_elasticsearch_logs_user_config) if external_google_cloud_logging_user_config is not None: pulumi.set(__self__, "external_google_cloud_logging_user_config", external_google_cloud_logging_user_config) if kafka_connect_user_config is not None: pulumi.set(__self__, "kafka_connect_user_config", kafka_connect_user_config) if kafka_logs_user_config is not None: pulumi.set(__self__, "kafka_logs_user_config", kafka_logs_user_config) if kafka_mirrormaker_user_config is not None: pulumi.set(__self__, "kafka_mirrormaker_user_config", kafka_mirrormaker_user_config) if logs_user_config is not None: pulumi.set(__self__, "logs_user_config", logs_user_config) if m3aggregator_user_config is not None: pulumi.set(__self__, "m3aggregator_user_config", m3aggregator_user_config) if m3coordinator_user_config is not None: pulumi.set(__self__, "m3coordinator_user_config", m3coordinator_user_config) if metrics_user_config is not None: pulumi.set(__self__, "metrics_user_config", metrics_user_config) if mirrormaker_user_config is not None: pulumi.set(__self__, "mirrormaker_user_config", mirrormaker_user_config) if prometheus_user_config is not None: pulumi.set(__self__, "prometheus_user_config", prometheus_user_config) if read_replica_user_config is not None: pulumi.set(__self__, "read_replica_user_config", read_replica_user_config) if rsyslog_user_config is not None: pulumi.set(__self__, "rsyslog_user_config", rsyslog_user_config) if schema_registry_proxy_user_config is not None: pulumi.set(__self__, "schema_registry_proxy_user_config", schema_registry_proxy_user_config) if signalfx_user_config is not None: pulumi.set(__self__, "signalfx_user_config", signalfx_user_config) if source_endpoint_id is not None: pulumi.set(__self__, "source_endpoint_id", source_endpoint_id) if source_service_name is not None: pulumi.set(__self__, "source_service_name", source_service_name) @property @pulumi.getter(name="integrationType") def integration_type(self) -> pulumi.Input[str]: """ identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. """ return pulumi.get(self, "integration_type") @integration_type.setter def integration_type(self, value: pulumi.Input[str]): pulumi.set(self, "integration_type", value) @property @pulumi.getter def project(self) -> pulumi.Input[str]: """ defines the project the integration belongs to. """ return pulumi.get(self, "project") @project.setter def project(self, value: pulumi.Input[str]): pulumi.set(self, "project", value) @property @pulumi.getter(name="dashboardUserConfig") def dashboard_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationDashboardUserConfigArgs']]: """ Dashboard specific user configurable settings """ return pulumi.get(self, "dashboard_user_config") @dashboard_user_config.setter def dashboard_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationDashboardUserConfigArgs']]): pulumi.set(self, "dashboard_user_config", value) @property @pulumi.getter(name="datadogUserConfig") def datadog_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationDatadogUserConfigArgs']]: """ Dashboard specific user configurable settings """ return pulumi.get(self, "datadog_user_config") @datadog_user_config.setter def datadog_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationDatadogUserConfigArgs']]): pulumi.set(self, "datadog_user_config", value) @property @pulumi.getter(name="destinationEndpointId") def destination_endpoint_id(self) -> Optional[pulumi.Input[str]]: """ or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. """ return pulumi.get(self, "destination_endpoint_id") @destination_endpoint_id.setter def destination_endpoint_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_endpoint_id", value) @property @pulumi.getter(name="destinationServiceName") def destination_service_name(self) -> Optional[pulumi.Input[str]]: """ Destination service for the integration (if any) """ return pulumi.get(self, "destination_service_name") @destination_service_name.setter def destination_service_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_service_name", value) @property @pulumi.getter(name="externalAwsCloudwatchLogsUserConfig") def external_aws_cloudwatch_logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]: """ External AWS Cloudwatch logs specific user configurable settings """ return pulumi.get(self, "external_aws_cloudwatch_logs_user_config") @external_aws_cloudwatch_logs_user_config.setter def external_aws_cloudwatch_logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]): pulumi.set(self, "external_aws_cloudwatch_logs_user_config", value) @property @pulumi.getter(name="externalAwsCloudwatchMetricsUserConfig") def external_aws_cloudwatch_metrics_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]: """ External AWS cloudwatch metrics specific user configurable settings """ return pulumi.get(self, "external_aws_cloudwatch_metrics_user_config") @external_aws_cloudwatch_metrics_user_config.setter def external_aws_cloudwatch_metrics_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]): pulumi.set(self, "external_aws_cloudwatch_metrics_user_config", value) @property @pulumi.getter(name="externalElasticsearchLogsUserConfig") def external_elasticsearch_logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]: """ External Elasticsearch logs specific user configurable settings """ return pulumi.get(self, "external_elasticsearch_logs_user_config") @external_elasticsearch_logs_user_config.setter def external_elasticsearch_logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]): pulumi.set(self, "external_elasticsearch_logs_user_config", value) @property @pulumi.getter(name="externalGoogleCloudLoggingUserConfig") def external_google_cloud_logging_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]: """ External Google Cloud Logging specific user configurable settings """ return pulumi.get(self, "external_google_cloud_logging_user_config") @external_google_cloud_logging_user_config.setter def external_google_cloud_logging_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]): pulumi.set(self, "external_google_cloud_logging_user_config", value) @property @pulumi.getter(name="kafkaConnectUserConfig") def kafka_connect_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs']]: """ Kafka Connect specific user configurable settings """ return pulumi.get(self, "kafka_connect_user_config") @kafka_connect_user_config.setter def kafka_connect_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs']]): pulumi.set(self, "kafka_connect_user_config", value) @property @pulumi.getter(name="kafkaLogsUserConfig") def kafka_logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs']]: """ Kafka Logs specific user configurable settings """ return pulumi.get(self, "kafka_logs_user_config") @kafka_logs_user_config.setter def kafka_logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs']]): pulumi.set(self, "kafka_logs_user_config", value) @property @pulumi.getter(name="kafkaMirrormakerUserConfig") def kafka_mirrormaker_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]: """ Mirrormaker 2 integration specific user configurable settings """ return pulumi.get(self, "kafka_mirrormaker_user_config") @kafka_mirrormaker_user_config.setter def kafka_mirrormaker_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]): pulumi.set(self, "kafka_mirrormaker_user_config", value) @property @pulumi.getter(name="logsUserConfig") def logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationLogsUserConfigArgs']]: """ Log integration specific user configurable settings """ return pulumi.get(self, "logs_user_config") @logs_user_config.setter def logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationLogsUserConfigArgs']]): pulumi.set(self, "logs_user_config", value) @property @pulumi.getter(name="m3aggregatorUserConfig") def m3aggregator_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs']]: """ M3 aggregator specific user configurable settings """ return pulumi.get(self, "m3aggregator_user_config") @m3aggregator_user_config.setter def m3aggregator_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs']]): pulumi.set(self, "m3aggregator_user_config", value) @property @pulumi.getter(name="m3coordinatorUserConfig") def m3coordinator_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs']]: """ M3 coordinator specific user configurable settings """ return pulumi.get(self, "m3coordinator_user_config") @m3coordinator_user_config.setter def m3coordinator_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs']]): pulumi.set(self, "m3coordinator_user_config", value) @property @pulumi.getter(name="metricsUserConfig") def metrics_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationMetricsUserConfigArgs']]: """ Metrics specific user configurable settings """ return pulumi.get(self, "metrics_user_config") @metrics_user_config.setter def metrics_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationMetricsUserConfigArgs']]): pulumi.set(self, "metrics_user_config", value) @property @pulumi.getter(name="mirrormakerUserConfig") def mirrormaker_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs']]: """ Mirrormaker 1 integration specific user configurable settings """ return pulumi.get(self, "mirrormaker_user_config") @mirrormaker_user_config.setter def mirrormaker_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs']]): pulumi.set(self, "mirrormaker_user_config", value) @property @pulumi.getter(name="prometheusUserConfig") def prometheus_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs']]: """ Prometheus coordinator specific user configurable settings """ return pulumi.get(self, "prometheus_user_config") @prometheus_user_config.setter def prometheus_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs']]): pulumi.set(self, "prometheus_user_config", value) @property @pulumi.getter(name="readReplicaUserConfig") def read_replica_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs']]: """ PG Read replica specific user configurable settings """ return pulumi.get(self, "read_replica_user_config") @read_replica_user_config.setter def read_replica_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs']]): pulumi.set(self, "read_replica_user_config", value) @property @pulumi.getter(name="rsyslogUserConfig") def rsyslog_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs']]: """ RSyslog specific user configurable settings """ return pulumi.get(self, "rsyslog_user_config") @rsyslog_user_config.setter def rsyslog_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs']]): pulumi.set(self, "rsyslog_user_config", value) @property @pulumi.getter(name="schemaRegistryProxyUserConfig") def schema_registry_proxy_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]: """ Schema registry proxy specific user configurable settings """ return pulumi.get(self, "schema_registry_proxy_user_config") @schema_registry_proxy_user_config.setter def schema_registry_proxy_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]): pulumi.set(self, "schema_registry_proxy_user_config", value) @property @pulumi.getter(name="signalfxUserConfig") def signalfx_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs']]: """ Signalfx specific user configurable settings """ return pulumi.get(self, "signalfx_user_config") @signalfx_user_config.setter def signalfx_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs']]): pulumi.set(self, "signalfx_user_config", value) @property @pulumi.getter(name="sourceEndpointId") def source_endpoint_id(self) -> Optional[pulumi.Input[str]]: """ or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. """ return pulumi.get(self, "source_endpoint_id") @source_endpoint_id.setter def source_endpoint_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_endpoint_id", value) @property @pulumi.getter(name="sourceServiceName") def source_service_name(self) -> Optional[pulumi.Input[str]]: """ Source service for the integration (if any) """ return pulumi.get(self, "source_service_name") @source_service_name.setter def source_service_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_service_name", value) @pulumi.input_type class _ServiceIntegrationState: def __init__(__self__, *, dashboard_user_config: Optional[pulumi.Input['ServiceIntegrationDashboardUserConfigArgs']] = None, datadog_user_config: Optional[pulumi.Input['ServiceIntegrationDatadogUserConfigArgs']] = None, destination_endpoint_id: Optional[pulumi.Input[str]] = None, destination_service_name: Optional[pulumi.Input[str]] = None, external_aws_cloudwatch_logs_user_config: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']] = None, external_aws_cloudwatch_metrics_user_config: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']] = None, external_elasticsearch_logs_user_config: Optional[pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']] = None, external_google_cloud_logging_user_config: Optional[pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']] = None, integration_type: Optional[pulumi.Input[str]] = None, kafka_connect_user_config: Optional[pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs']] = None, kafka_logs_user_config: Optional[pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs']] = None, kafka_mirrormaker_user_config: Optional[pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs']] = None, logs_user_config: Optional[pulumi.Input['ServiceIntegrationLogsUserConfigArgs']] = None, m3aggregator_user_config: Optional[pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs']] = None, m3coordinator_user_config: Optional[pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs']] = None, metrics_user_config: Optional[pulumi.Input['ServiceIntegrationMetricsUserConfigArgs']] = None, mirrormaker_user_config: Optional[pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs']] = None, project: Optional[pulumi.Input[str]] = None, prometheus_user_config: Optional[pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs']] = None, read_replica_user_config: Optional[pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs']] = None, rsyslog_user_config: Optional[pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs']] = None, schema_registry_proxy_user_config: Optional[pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']] = None, signalfx_user_config: Optional[pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs']] = None, source_endpoint_id: Optional[pulumi.Input[str]] = None, source_service_name: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering ServiceIntegration resources. :param pulumi.Input['ServiceIntegrationDashboardUserConfigArgs'] dashboard_user_config: Dashboard specific user configurable settings :param pulumi.Input['ServiceIntegrationDatadogUserConfigArgs'] datadog_user_config: Dashboard specific user configurable settings :param pulumi.Input[str] destination_endpoint_id: or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] destination_service_name: Destination service for the integration (if any) :param pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs'] external_aws_cloudwatch_logs_user_config: External AWS Cloudwatch logs specific user configurable settings :param pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs'] external_aws_cloudwatch_metrics_user_config: External AWS cloudwatch metrics specific user configurable settings :param pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs'] external_elasticsearch_logs_user_config: External Elasticsearch logs specific user configurable settings :param pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs'] external_google_cloud_logging_user_config: External Google Cloud Logging specific user configurable settings :param pulumi.Input[str] integration_type: identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. :param pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs'] kafka_connect_user_config: Kafka Connect specific user configurable settings :param pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs'] kafka_logs_user_config: Kafka Logs specific user configurable settings :param pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs'] kafka_mirrormaker_user_config: Mirrormaker 2 integration specific user configurable settings :param pulumi.Input['ServiceIntegrationLogsUserConfigArgs'] logs_user_config: Log integration specific user configurable settings :param pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs'] m3aggregator_user_config: M3 aggregator specific user configurable settings :param pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs'] m3coordinator_user_config: M3 coordinator specific user configurable settings :param pulumi.Input['ServiceIntegrationMetricsUserConfigArgs'] metrics_user_config: Metrics specific user configurable settings :param pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs'] mirrormaker_user_config: Mirrormaker 1 integration specific user configurable settings :param pulumi.Input[str] project: defines the project the integration belongs to. :param pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs'] prometheus_user_config: Prometheus coordinator specific user configurable settings :param pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs'] read_replica_user_config: PG Read replica specific user configurable settings :param pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs'] rsyslog_user_config: RSyslog specific user configurable settings :param pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs'] schema_registry_proxy_user_config: Schema registry proxy specific user configurable settings :param pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs'] signalfx_user_config: Signalfx specific user configurable settings :param pulumi.Input[str] source_endpoint_id: or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] source_service_name: Source service for the integration (if any) """ if dashboard_user_config is not None: pulumi.set(__self__, "dashboard_user_config", dashboard_user_config) if datadog_user_config is not None: pulumi.set(__self__, "datadog_user_config", datadog_user_config) if destination_endpoint_id is not None: pulumi.set(__self__, "destination_endpoint_id", destination_endpoint_id) if destination_service_name is not None: pulumi.set(__self__, "destination_service_name", destination_service_name) if external_aws_cloudwatch_logs_user_config is not None: pulumi.set(__self__, "external_aws_cloudwatch_logs_user_config", external_aws_cloudwatch_logs_user_config) if external_aws_cloudwatch_metrics_user_config is not None: pulumi.set(__self__, "external_aws_cloudwatch_metrics_user_config", external_aws_cloudwatch_metrics_user_config) if external_elasticsearch_logs_user_config is not None: pulumi.set(__self__, "external_elasticsearch_logs_user_config", external_elasticsearch_logs_user_config) if external_google_cloud_logging_user_config is not None: pulumi.set(__self__, "external_google_cloud_logging_user_config", external_google_cloud_logging_user_config) if integration_type is not None: pulumi.set(__self__, "integration_type", integration_type) if kafka_connect_user_config is not None: pulumi.set(__self__, "kafka_connect_user_config", kafka_connect_user_config) if kafka_logs_user_config is not None: pulumi.set(__self__, "kafka_logs_user_config", kafka_logs_user_config) if kafka_mirrormaker_user_config is not None: pulumi.set(__self__, "kafka_mirrormaker_user_config", kafka_mirrormaker_user_config) if logs_user_config is not None: pulumi.set(__self__, "logs_user_config", logs_user_config) if m3aggregator_user_config is not None: pulumi.set(__self__, "m3aggregator_user_config", m3aggregator_user_config) if m3coordinator_user_config is not None: pulumi.set(__self__, "m3coordinator_user_config", m3coordinator_user_config) if metrics_user_config is not None: pulumi.set(__self__, "metrics_user_config", metrics_user_config) if mirrormaker_user_config is not None: pulumi.set(__self__, "mirrormaker_user_config", mirrormaker_user_config) if project is not None: pulumi.set(__self__, "project", project) if prometheus_user_config is not None: pulumi.set(__self__, "prometheus_user_config", prometheus_user_config) if read_replica_user_config is not None: pulumi.set(__self__, "read_replica_user_config", read_replica_user_config) if rsyslog_user_config is not None: pulumi.set(__self__, "rsyslog_user_config", rsyslog_user_config) if schema_registry_proxy_user_config is not None: pulumi.set(__self__, "schema_registry_proxy_user_config", schema_registry_proxy_user_config) if signalfx_user_config is not None: pulumi.set(__self__, "signalfx_user_config", signalfx_user_config) if source_endpoint_id is not None: pulumi.set(__self__, "source_endpoint_id", source_endpoint_id) if source_service_name is not None: pulumi.set(__self__, "source_service_name", source_service_name) @property @pulumi.getter(name="dashboardUserConfig") def dashboard_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationDashboardUserConfigArgs']]: """ Dashboard specific user configurable settings """ return pulumi.get(self, "dashboard_user_config") @dashboard_user_config.setter def dashboard_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationDashboardUserConfigArgs']]): pulumi.set(self, "dashboard_user_config", value) @property @pulumi.getter(name="datadogUserConfig") def datadog_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationDatadogUserConfigArgs']]: """ Dashboard specific user configurable settings """ return pulumi.get(self, "datadog_user_config") @datadog_user_config.setter def datadog_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationDatadogUserConfigArgs']]): pulumi.set(self, "datadog_user_config", value) @property @pulumi.getter(name="destinationEndpointId") def destination_endpoint_id(self) -> Optional[pulumi.Input[str]]: """ or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. """ return pulumi.get(self, "destination_endpoint_id") @destination_endpoint_id.setter def destination_endpoint_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_endpoint_id", value) @property @pulumi.getter(name="destinationServiceName") def destination_service_name(self) -> Optional[pulumi.Input[str]]: """ Destination service for the integration (if any) """ return pulumi.get(self, "destination_service_name") @destination_service_name.setter def destination_service_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_service_name", value) @property @pulumi.getter(name="externalAwsCloudwatchLogsUserConfig") def external_aws_cloudwatch_logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]: """ External AWS Cloudwatch logs specific user configurable settings """ return pulumi.get(self, "external_aws_cloudwatch_logs_user_config") @external_aws_cloudwatch_logs_user_config.setter def external_aws_cloudwatch_logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]): pulumi.set(self, "external_aws_cloudwatch_logs_user_config", value) @property @pulumi.getter(name="externalAwsCloudwatchMetricsUserConfig") def external_aws_cloudwatch_metrics_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]: """ External AWS cloudwatch metrics specific user configurable settings """ return pulumi.get(self, "external_aws_cloudwatch_metrics_user_config") @external_aws_cloudwatch_metrics_user_config.setter def external_aws_cloudwatch_metrics_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]): pulumi.set(self, "external_aws_cloudwatch_metrics_user_config", value) @property @pulumi.getter(name="externalElasticsearchLogsUserConfig") def external_elasticsearch_logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]: """ External Elasticsearch logs specific user configurable settings """ return pulumi.get(self, "external_elasticsearch_logs_user_config") @external_elasticsearch_logs_user_config.setter def external_elasticsearch_logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]): pulumi.set(self, "external_elasticsearch_logs_user_config", value) @property @pulumi.getter(name="externalGoogleCloudLoggingUserConfig") def external_google_cloud_logging_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]: """ External Google Cloud Logging specific user configurable settings """ return pulumi.get(self, "external_google_cloud_logging_user_config") @external_google_cloud_logging_user_config.setter def external_google_cloud_logging_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]): pulumi.set(self, "external_google_cloud_logging_user_config", value) @property @pulumi.getter(name="integrationType") def integration_type(self) -> Optional[pulumi.Input[str]]: """ identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. """ return pulumi.get(self, "integration_type") @integration_type.setter def integration_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "integration_type", value) @property @pulumi.getter(name="kafkaConnectUserConfig") def kafka_connect_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs']]: """ Kafka Connect specific user configurable settings """ return pulumi.get(self, "kafka_connect_user_config") @kafka_connect_user_config.setter def kafka_connect_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationKafkaConnectUserConfigArgs']]): pulumi.set(self, "kafka_connect_user_config", value) @property @pulumi.getter(name="kafkaLogsUserConfig") def kafka_logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs']]: """ Kafka Logs specific user configurable settings """ return pulumi.get(self, "kafka_logs_user_config") @kafka_logs_user_config.setter def kafka_logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationKafkaLogsUserConfigArgs']]): pulumi.set(self, "kafka_logs_user_config", value) @property @pulumi.getter(name="kafkaMirrormakerUserConfig") def kafka_mirrormaker_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]: """ Mirrormaker 2 integration specific user configurable settings """ return pulumi.get(self, "kafka_mirrormaker_user_config") @kafka_mirrormaker_user_config.setter def kafka_mirrormaker_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]): pulumi.set(self, "kafka_mirrormaker_user_config", value) @property @pulumi.getter(name="logsUserConfig") def logs_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationLogsUserConfigArgs']]: """ Log integration specific user configurable settings """ return pulumi.get(self, "logs_user_config") @logs_user_config.setter def logs_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationLogsUserConfigArgs']]): pulumi.set(self, "logs_user_config", value) @property @pulumi.getter(name="m3aggregatorUserConfig") def m3aggregator_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs']]: """ M3 aggregator specific user configurable settings """ return pulumi.get(self, "m3aggregator_user_config") @m3aggregator_user_config.setter def m3aggregator_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationM3aggregatorUserConfigArgs']]): pulumi.set(self, "m3aggregator_user_config", value) @property @pulumi.getter(name="m3coordinatorUserConfig") def m3coordinator_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs']]: """ M3 coordinator specific user configurable settings """ return pulumi.get(self, "m3coordinator_user_config") @m3coordinator_user_config.setter def m3coordinator_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationM3coordinatorUserConfigArgs']]): pulumi.set(self, "m3coordinator_user_config", value) @property @pulumi.getter(name="metricsUserConfig") def metrics_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationMetricsUserConfigArgs']]: """ Metrics specific user configurable settings """ return pulumi.get(self, "metrics_user_config") @metrics_user_config.setter def metrics_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationMetricsUserConfigArgs']]): pulumi.set(self, "metrics_user_config", value) @property @pulumi.getter(name="mirrormakerUserConfig") def mirrormaker_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs']]: """ Mirrormaker 1 integration specific user configurable settings """ return pulumi.get(self, "mirrormaker_user_config") @mirrormaker_user_config.setter def mirrormaker_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationMirrormakerUserConfigArgs']]): pulumi.set(self, "mirrormaker_user_config", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ defines the project the integration belongs to. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="prometheusUserConfig") def prometheus_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs']]: """ Prometheus coordinator specific user configurable settings """ return pulumi.get(self, "prometheus_user_config") @prometheus_user_config.setter def prometheus_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationPrometheusUserConfigArgs']]): pulumi.set(self, "prometheus_user_config", value) @property @pulumi.getter(name="readReplicaUserConfig") def read_replica_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs']]: """ PG Read replica specific user configurable settings """ return pulumi.get(self, "read_replica_user_config") @read_replica_user_config.setter def read_replica_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationReadReplicaUserConfigArgs']]): pulumi.set(self, "read_replica_user_config", value) @property @pulumi.getter(name="rsyslogUserConfig") def rsyslog_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs']]: """ RSyslog specific user configurable settings """ return pulumi.get(self, "rsyslog_user_config") @rsyslog_user_config.setter def rsyslog_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationRsyslogUserConfigArgs']]): pulumi.set(self, "rsyslog_user_config", value) @property @pulumi.getter(name="schemaRegistryProxyUserConfig") def schema_registry_proxy_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]: """ Schema registry proxy specific user configurable settings """ return pulumi.get(self, "schema_registry_proxy_user_config") @schema_registry_proxy_user_config.setter def schema_registry_proxy_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]): pulumi.set(self, "schema_registry_proxy_user_config", value) @property @pulumi.getter(name="signalfxUserConfig") def signalfx_user_config(self) -> Optional[pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs']]: """ Signalfx specific user configurable settings """ return pulumi.get(self, "signalfx_user_config") @signalfx_user_config.setter def signalfx_user_config(self, value: Optional[pulumi.Input['ServiceIntegrationSignalfxUserConfigArgs']]): pulumi.set(self, "signalfx_user_config", value) @property @pulumi.getter(name="sourceEndpointId") def source_endpoint_id(self) -> Optional[pulumi.Input[str]]: """ or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. """ return pulumi.get(self, "source_endpoint_id") @source_endpoint_id.setter def source_endpoint_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_endpoint_id", value) @property @pulumi.getter(name="sourceServiceName") def source_service_name(self) -> Optional[pulumi.Input[str]]: """ Source service for the integration (if any) """ return pulumi.get(self, "source_service_name") @source_service_name.setter def source_service_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_service_name", value) class ServiceIntegration(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, dashboard_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationDashboardUserConfigArgs']]] = None, datadog_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationDatadogUserConfigArgs']]] = None, destination_endpoint_id: Optional[pulumi.Input[str]] = None, destination_service_name: Optional[pulumi.Input[str]] = None, external_aws_cloudwatch_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]] = None, external_aws_cloudwatch_metrics_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]] = None, external_elasticsearch_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]] = None, external_google_cloud_logging_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]] = None, integration_type: Optional[pulumi.Input[str]] = None, kafka_connect_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaConnectUserConfigArgs']]] = None, kafka_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaLogsUserConfigArgs']]] = None, kafka_mirrormaker_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]] = None, logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationLogsUserConfigArgs']]] = None, m3aggregator_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationM3aggregatorUserConfigArgs']]] = None, m3coordinator_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationM3coordinatorUserConfigArgs']]] = None, metrics_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationMetricsUserConfigArgs']]] = None, mirrormaker_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationMirrormakerUserConfigArgs']]] = None, project: Optional[pulumi.Input[str]] = None, prometheus_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationPrometheusUserConfigArgs']]] = None, read_replica_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationReadReplicaUserConfigArgs']]] = None, rsyslog_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationRsyslogUserConfigArgs']]] = None, schema_registry_proxy_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]] = None, signalfx_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationSignalfxUserConfigArgs']]] = None, source_endpoint_id: Optional[pulumi.Input[str]] = None, source_service_name: Optional[pulumi.Input[str]] = None, __props__=None): """ ## # Service Integration Resource The Service Integration resource allows the creation and management of Aiven Service Integrations. Service Integration defines an integration between two Aiven services or between Aiven service and an external integration endpoint. Integration could be for example sending metrics from Kafka service to an InfluxDB service, getting metrics from an InfluxDB service to a Grafana service to show dashboards, sending logs from any service to Elasticsearch, etc. ## Example Usage ```python import pulumi import pulumi_aiven as aiven myintegration = aiven.ServiceIntegration("myintegration", project=aiven_project["myproject"]["project"], destination_endpoint_id=aiven_service_integration_endpoint["XX"]["id"], integration_type="datadog", source_service_name=aiven_kafka["XXX"]["service_name"]) ``` > **Note** For services running on `hobbiest` plan service integrations are not supported. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['ServiceIntegrationDashboardUserConfigArgs']] dashboard_user_config: Dashboard specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationDatadogUserConfigArgs']] datadog_user_config: Dashboard specific user configurable settings :param pulumi.Input[str] destination_endpoint_id: or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] destination_service_name: Destination service for the integration (if any) :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']] external_aws_cloudwatch_logs_user_config: External AWS Cloudwatch logs specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']] external_aws_cloudwatch_metrics_user_config: External AWS cloudwatch metrics specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']] external_elasticsearch_logs_user_config: External Elasticsearch logs specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']] external_google_cloud_logging_user_config: External Google Cloud Logging specific user configurable settings :param pulumi.Input[str] integration_type: identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. :param pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaConnectUserConfigArgs']] kafka_connect_user_config: Kafka Connect specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaLogsUserConfigArgs']] kafka_logs_user_config: Kafka Logs specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaMirrormakerUserConfigArgs']] kafka_mirrormaker_user_config: Mirrormaker 2 integration specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationLogsUserConfigArgs']] logs_user_config: Log integration specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationM3aggregatorUserConfigArgs']] m3aggregator_user_config: M3 aggregator specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationM3coordinatorUserConfigArgs']] m3coordinator_user_config: M3 coordinator specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationMetricsUserConfigArgs']] metrics_user_config: Metrics specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationMirrormakerUserConfigArgs']] mirrormaker_user_config: Mirrormaker 1 integration specific user configurable settings :param pulumi.Input[str] project: defines the project the integration belongs to. :param pulumi.Input[pulumi.InputType['ServiceIntegrationPrometheusUserConfigArgs']] prometheus_user_config: Prometheus coordinator specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationReadReplicaUserConfigArgs']] read_replica_user_config: PG Read replica specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationRsyslogUserConfigArgs']] rsyslog_user_config: RSyslog specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']] schema_registry_proxy_user_config: Schema registry proxy specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationSignalfxUserConfigArgs']] signalfx_user_config: Signalfx specific user configurable settings :param pulumi.Input[str] source_endpoint_id: or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] source_service_name: Source service for the integration (if any) """ ... @overload def __init__(__self__, resource_name: str, args: ServiceIntegrationArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## # Service Integration Resource The Service Integration resource allows the creation and management of Aiven Service Integrations. Service Integration defines an integration between two Aiven services or between Aiven service and an external integration endpoint. Integration could be for example sending metrics from Kafka service to an InfluxDB service, getting metrics from an InfluxDB service to a Grafana service to show dashboards, sending logs from any service to Elasticsearch, etc. ## Example Usage ```python import pulumi import pulumi_aiven as aiven myintegration = aiven.ServiceIntegration("myintegration", project=aiven_project["myproject"]["project"], destination_endpoint_id=aiven_service_integration_endpoint["XX"]["id"], integration_type="datadog", source_service_name=aiven_kafka["XXX"]["service_name"]) ``` > **Note** For services running on `hobbiest` plan service integrations are not supported. :param str resource_name: The name of the resource. :param ServiceIntegrationArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ServiceIntegrationArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, dashboard_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationDashboardUserConfigArgs']]] = None, datadog_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationDatadogUserConfigArgs']]] = None, destination_endpoint_id: Optional[pulumi.Input[str]] = None, destination_service_name: Optional[pulumi.Input[str]] = None, external_aws_cloudwatch_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]] = None, external_aws_cloudwatch_metrics_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]] = None, external_elasticsearch_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]] = None, external_google_cloud_logging_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]] = None, integration_type: Optional[pulumi.Input[str]] = None, kafka_connect_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaConnectUserConfigArgs']]] = None, kafka_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaLogsUserConfigArgs']]] = None, kafka_mirrormaker_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]] = None, logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationLogsUserConfigArgs']]] = None, m3aggregator_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationM3aggregatorUserConfigArgs']]] = None, m3coordinator_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationM3coordinatorUserConfigArgs']]] = None, metrics_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationMetricsUserConfigArgs']]] = None, mirrormaker_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationMirrormakerUserConfigArgs']]] = None, project: Optional[pulumi.Input[str]] = None, prometheus_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationPrometheusUserConfigArgs']]] = None, read_replica_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationReadReplicaUserConfigArgs']]] = None, rsyslog_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationRsyslogUserConfigArgs']]] = None, schema_registry_proxy_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]] = None, signalfx_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationSignalfxUserConfigArgs']]] = None, source_endpoint_id: Optional[pulumi.Input[str]] = None, source_service_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ServiceIntegrationArgs.__new__(ServiceIntegrationArgs) __props__.__dict__["dashboard_user_config"] = dashboard_user_config __props__.__dict__["datadog_user_config"] = datadog_user_config __props__.__dict__["destination_endpoint_id"] = destination_endpoint_id __props__.__dict__["destination_service_name"] = destination_service_name __props__.__dict__["external_aws_cloudwatch_logs_user_config"] = external_aws_cloudwatch_logs_user_config __props__.__dict__["external_aws_cloudwatch_metrics_user_config"] = external_aws_cloudwatch_metrics_user_config __props__.__dict__["external_elasticsearch_logs_user_config"] = external_elasticsearch_logs_user_config __props__.__dict__["external_google_cloud_logging_user_config"] = external_google_cloud_logging_user_config if integration_type is None and not opts.urn: raise TypeError("Missing required property 'integration_type'") __props__.__dict__["integration_type"] = integration_type __props__.__dict__["kafka_connect_user_config"] = kafka_connect_user_config __props__.__dict__["kafka_logs_user_config"] = kafka_logs_user_config __props__.__dict__["kafka_mirrormaker_user_config"] = kafka_mirrormaker_user_config __props__.__dict__["logs_user_config"] = logs_user_config __props__.__dict__["m3aggregator_user_config"] = m3aggregator_user_config __props__.__dict__["m3coordinator_user_config"] = m3coordinator_user_config __props__.__dict__["metrics_user_config"] = metrics_user_config __props__.__dict__["mirrormaker_user_config"] = mirrormaker_user_config if project is None and not opts.urn: raise TypeError("Missing required property 'project'") __props__.__dict__["project"] = project __props__.__dict__["prometheus_user_config"] = prometheus_user_config __props__.__dict__["read_replica_user_config"] = read_replica_user_config __props__.__dict__["rsyslog_user_config"] = rsyslog_user_config __props__.__dict__["schema_registry_proxy_user_config"] = schema_registry_proxy_user_config __props__.__dict__["signalfx_user_config"] = signalfx_user_config __props__.__dict__["source_endpoint_id"] = source_endpoint_id __props__.__dict__["source_service_name"] = source_service_name super(ServiceIntegration, __self__).__init__( 'aiven:index/serviceIntegration:ServiceIntegration', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, dashboard_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationDashboardUserConfigArgs']]] = None, datadog_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationDatadogUserConfigArgs']]] = None, destination_endpoint_id: Optional[pulumi.Input[str]] = None, destination_service_name: Optional[pulumi.Input[str]] = None, external_aws_cloudwatch_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']]] = None, external_aws_cloudwatch_metrics_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']]] = None, external_elasticsearch_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']]] = None, external_google_cloud_logging_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']]] = None, integration_type: Optional[pulumi.Input[str]] = None, kafka_connect_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaConnectUserConfigArgs']]] = None, kafka_logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaLogsUserConfigArgs']]] = None, kafka_mirrormaker_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaMirrormakerUserConfigArgs']]] = None, logs_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationLogsUserConfigArgs']]] = None, m3aggregator_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationM3aggregatorUserConfigArgs']]] = None, m3coordinator_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationM3coordinatorUserConfigArgs']]] = None, metrics_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationMetricsUserConfigArgs']]] = None, mirrormaker_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationMirrormakerUserConfigArgs']]] = None, project: Optional[pulumi.Input[str]] = None, prometheus_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationPrometheusUserConfigArgs']]] = None, read_replica_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationReadReplicaUserConfigArgs']]] = None, rsyslog_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationRsyslogUserConfigArgs']]] = None, schema_registry_proxy_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']]] = None, signalfx_user_config: Optional[pulumi.Input[pulumi.InputType['ServiceIntegrationSignalfxUserConfigArgs']]] = None, source_endpoint_id: Optional[pulumi.Input[str]] = None, source_service_name: Optional[pulumi.Input[str]] = None) -> 'ServiceIntegration': """ Get an existing ServiceIntegration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['ServiceIntegrationDashboardUserConfigArgs']] dashboard_user_config: Dashboard specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationDatadogUserConfigArgs']] datadog_user_config: Dashboard specific user configurable settings :param pulumi.Input[str] destination_endpoint_id: or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] destination_service_name: Destination service for the integration (if any) :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchLogsUserConfigArgs']] external_aws_cloudwatch_logs_user_config: External AWS Cloudwatch logs specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalAwsCloudwatchMetricsUserConfigArgs']] external_aws_cloudwatch_metrics_user_config: External AWS cloudwatch metrics specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalElasticsearchLogsUserConfigArgs']] external_elasticsearch_logs_user_config: External Elasticsearch logs specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationExternalGoogleCloudLoggingUserConfigArgs']] external_google_cloud_logging_user_config: External Google Cloud Logging specific user configurable settings :param pulumi.Input[str] integration_type: identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. :param pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaConnectUserConfigArgs']] kafka_connect_user_config: Kafka Connect specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaLogsUserConfigArgs']] kafka_logs_user_config: Kafka Logs specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationKafkaMirrormakerUserConfigArgs']] kafka_mirrormaker_user_config: Mirrormaker 2 integration specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationLogsUserConfigArgs']] logs_user_config: Log integration specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationM3aggregatorUserConfigArgs']] m3aggregator_user_config: M3 aggregator specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationM3coordinatorUserConfigArgs']] m3coordinator_user_config: M3 coordinator specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationMetricsUserConfigArgs']] metrics_user_config: Metrics specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationMirrormakerUserConfigArgs']] mirrormaker_user_config: Mirrormaker 1 integration specific user configurable settings :param pulumi.Input[str] project: defines the project the integration belongs to. :param pulumi.Input[pulumi.InputType['ServiceIntegrationPrometheusUserConfigArgs']] prometheus_user_config: Prometheus coordinator specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationReadReplicaUserConfigArgs']] read_replica_user_config: PG Read replica specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationRsyslogUserConfigArgs']] rsyslog_user_config: RSyslog specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationSchemaRegistryProxyUserConfigArgs']] schema_registry_proxy_user_config: Schema registry proxy specific user configurable settings :param pulumi.Input[pulumi.InputType['ServiceIntegrationSignalfxUserConfigArgs']] signalfx_user_config: Signalfx specific user configurable settings :param pulumi.Input[str] source_endpoint_id: or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. :param pulumi.Input[str] source_service_name: Source service for the integration (if any) """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _ServiceIntegrationState.__new__(_ServiceIntegrationState) __props__.__dict__["dashboard_user_config"] = dashboard_user_config __props__.__dict__["datadog_user_config"] = datadog_user_config __props__.__dict__["destination_endpoint_id"] = destination_endpoint_id __props__.__dict__["destination_service_name"] = destination_service_name __props__.__dict__["external_aws_cloudwatch_logs_user_config"] = external_aws_cloudwatch_logs_user_config __props__.__dict__["external_aws_cloudwatch_metrics_user_config"] = external_aws_cloudwatch_metrics_user_config __props__.__dict__["external_elasticsearch_logs_user_config"] = external_elasticsearch_logs_user_config __props__.__dict__["external_google_cloud_logging_user_config"] = external_google_cloud_logging_user_config __props__.__dict__["integration_type"] = integration_type __props__.__dict__["kafka_connect_user_config"] = kafka_connect_user_config __props__.__dict__["kafka_logs_user_config"] = kafka_logs_user_config __props__.__dict__["kafka_mirrormaker_user_config"] = kafka_mirrormaker_user_config __props__.__dict__["logs_user_config"] = logs_user_config __props__.__dict__["m3aggregator_user_config"] = m3aggregator_user_config __props__.__dict__["m3coordinator_user_config"] = m3coordinator_user_config __props__.__dict__["metrics_user_config"] = metrics_user_config __props__.__dict__["mirrormaker_user_config"] = mirrormaker_user_config __props__.__dict__["project"] = project __props__.__dict__["prometheus_user_config"] = prometheus_user_config __props__.__dict__["read_replica_user_config"] = read_replica_user_config __props__.__dict__["rsyslog_user_config"] = rsyslog_user_config __props__.__dict__["schema_registry_proxy_user_config"] = schema_registry_proxy_user_config __props__.__dict__["signalfx_user_config"] = signalfx_user_config __props__.__dict__["source_endpoint_id"] = source_endpoint_id __props__.__dict__["source_service_name"] = source_service_name return ServiceIntegration(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="dashboardUserConfig") def dashboard_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationDashboardUserConfig']]: """ Dashboard specific user configurable settings """ return pulumi.get(self, "dashboard_user_config") @property @pulumi.getter(name="datadogUserConfig") def datadog_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationDatadogUserConfig']]: """ Dashboard specific user configurable settings """ return pulumi.get(self, "datadog_user_config") @property @pulumi.getter(name="destinationEndpointId") def destination_endpoint_id(self) -> pulumi.Output[Optional[str]]: """ or `destination_service_name` - (Required) identifies the target side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the target needs to be defined using the reference syntax described above to set up the dependency correctly. """ return pulumi.get(self, "destination_endpoint_id") @property @pulumi.getter(name="destinationServiceName") def destination_service_name(self) -> pulumi.Output[Optional[str]]: """ Destination service for the integration (if any) """ return pulumi.get(self, "destination_service_name") @property @pulumi.getter(name="externalAwsCloudwatchLogsUserConfig") def external_aws_cloudwatch_logs_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationExternalAwsCloudwatchLogsUserConfig']]: """ External AWS Cloudwatch logs specific user configurable settings """ return pulumi.get(self, "external_aws_cloudwatch_logs_user_config") @property @pulumi.getter(name="externalAwsCloudwatchMetricsUserConfig") def external_aws_cloudwatch_metrics_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationExternalAwsCloudwatchMetricsUserConfig']]: """ External AWS cloudwatch metrics specific user configurable settings """ return pulumi.get(self, "external_aws_cloudwatch_metrics_user_config") @property @pulumi.getter(name="externalElasticsearchLogsUserConfig") def external_elasticsearch_logs_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationExternalElasticsearchLogsUserConfig']]: """ External Elasticsearch logs specific user configurable settings """ return pulumi.get(self, "external_elasticsearch_logs_user_config") @property @pulumi.getter(name="externalGoogleCloudLoggingUserConfig") def external_google_cloud_logging_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationExternalGoogleCloudLoggingUserConfig']]: """ External Google Cloud Logging specific user configurable settings """ return pulumi.get(self, "external_google_cloud_logging_user_config") @property @pulumi.getter(name="integrationType") def integration_type(self) -> pulumi.Output[str]: """ identifies the type of integration that is set up. Possible values include `dashboard` , `datadog`, `logs`, `metrics`, `kafka_connect`, `external_google_cloud_logging`, `external_elasticsearch_logs` `external_aws_cloudwatch_logs`, `read_replica`, `rsyslog`, `signalfx`, `kafka_logs`, `m3aggregator`, `m3coordinator`, `prometheus`, `schema_registry_proxy` and `kafka_mirrormaker`. """ return pulumi.get(self, "integration_type") @property @pulumi.getter(name="kafkaConnectUserConfig") def kafka_connect_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationKafkaConnectUserConfig']]: """ Kafka Connect specific user configurable settings """ return pulumi.get(self, "kafka_connect_user_config") @property @pulumi.getter(name="kafkaLogsUserConfig") def kafka_logs_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationKafkaLogsUserConfig']]: """ Kafka Logs specific user configurable settings """ return pulumi.get(self, "kafka_logs_user_config") @property @pulumi.getter(name="kafkaMirrormakerUserConfig") def kafka_mirrormaker_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationKafkaMirrormakerUserConfig']]: """ Mirrormaker 2 integration specific user configurable settings """ return pulumi.get(self, "kafka_mirrormaker_user_config") @property @pulumi.getter(name="logsUserConfig") def logs_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationLogsUserConfig']]: """ Log integration specific user configurable settings """ return pulumi.get(self, "logs_user_config") @property @pulumi.getter(name="m3aggregatorUserConfig") def m3aggregator_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationM3aggregatorUserConfig']]: """ M3 aggregator specific user configurable settings """ return pulumi.get(self, "m3aggregator_user_config") @property @pulumi.getter(name="m3coordinatorUserConfig") def m3coordinator_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationM3coordinatorUserConfig']]: """ M3 coordinator specific user configurable settings """ return pulumi.get(self, "m3coordinator_user_config") @property @pulumi.getter(name="metricsUserConfig") def metrics_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationMetricsUserConfig']]: """ Metrics specific user configurable settings """ return pulumi.get(self, "metrics_user_config") @property @pulumi.getter(name="mirrormakerUserConfig") def mirrormaker_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationMirrormakerUserConfig']]: """ Mirrormaker 1 integration specific user configurable settings """ return pulumi.get(self, "mirrormaker_user_config") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ defines the project the integration belongs to. """ return pulumi.get(self, "project") @property @pulumi.getter(name="prometheusUserConfig") def prometheus_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationPrometheusUserConfig']]: """ Prometheus coordinator specific user configurable settings """ return pulumi.get(self, "prometheus_user_config") @property @pulumi.getter(name="readReplicaUserConfig") def read_replica_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationReadReplicaUserConfig']]: """ PG Read replica specific user configurable settings """ return pulumi.get(self, "read_replica_user_config") @property @pulumi.getter(name="rsyslogUserConfig") def rsyslog_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationRsyslogUserConfig']]: """ RSyslog specific user configurable settings """ return pulumi.get(self, "rsyslog_user_config") @property @pulumi.getter(name="schemaRegistryProxyUserConfig") def schema_registry_proxy_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationSchemaRegistryProxyUserConfig']]: """ Schema registry proxy specific user configurable settings """ return pulumi.get(self, "schema_registry_proxy_user_config") @property @pulumi.getter(name="signalfxUserConfig") def signalfx_user_config(self) -> pulumi.Output[Optional['outputs.ServiceIntegrationSignalfxUserConfig']]: """ Signalfx specific user configurable settings """ return pulumi.get(self, "signalfx_user_config") @property @pulumi.getter(name="sourceEndpointId") def source_endpoint_id(self) -> pulumi.Output[Optional[str]]: """ or `source_service_name` - (Optional) identifies the source side of the integration. Only either endpoint identifier (e.g. `aiven_service_integration_endpoint.XXX.id`) or service name ( e.g. `aiven_kafka.XXX.service_name`) must be specified. In either case the source needs to be defined using the reference syntax described above to set up the dependency correctly. """ return pulumi.get(self, "source_endpoint_id") @property @pulumi.getter(name="sourceServiceName") def source_service_name(self) -> pulumi.Output[Optional[str]]: """ Source service for the integration (if any) """ return pulumi.get(self, "source_service_name")
64.366979
222
0.746262
8,918
89,277
7.13983
0.03207
0.092504
0.06535
0.066842
0.959041
0.956481
0.955413
0.946461
0.941639
0.940524
0
0.001879
0.171186
89,277
1,386
223
64.41342
0.858637
0.318021
0
0.894402
1
0
0.282325
0.243725
0
0
0
0
0
1
0.167939
false
0.001272
0.008906
0
0.277354
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
69b3a0c174016f62c5a1487e31360c2d4f2ec634
1,030
py
Python
TrainningApp/market/models.py
JacoboZabala/Frameworks_7A_2020B
a8834666551a6d615b94d1d38ddd1997781faaef
[ "MIT" ]
null
null
null
TrainningApp/market/models.py
JacoboZabala/Frameworks_7A_2020B
a8834666551a6d615b94d1d38ddd1997781faaef
[ "MIT" ]
null
null
null
TrainningApp/market/models.py
JacoboZabala/Frameworks_7A_2020B
a8834666551a6d615b94d1d38ddd1997781faaef
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class Category(models.Model) : code = models.CharField(max_length=100) name = models.CharField(max_length=100) status = models.BooleanField(default=True) create_date = models.DateTimeField('Date creation') update_date = models.DateTimeField('Date update') class Seller(models.Model) : code = models.CharField(max_length=1000) name = models.CharField(max_length=100) age = models.IntegerField(max_length=30) class Buyers(models.Model) : code = models.CharField(max_length=1000) name = models.CharField(max_length=100) age = models.IntegerField(max_length=30) city = models.CharField(max_length=50) class Admin(models.Model) : code = models.CharField(max_length=1000) name = models.CharField(max_length=100) age = models.IntegerField(max_length=30) status = models.BooleanField(default=True) create_date = models.DateTimeField('Date creation') update_date = models.DateTimeField('Date update')
35.517241
59
0.733981
131
1,030
5.648855
0.259542
0.145946
0.218919
0.291892
0.839189
0.835135
0.793243
0.740541
0.740541
0.740541
0
0.040276
0.156311
1,030
29
59
35.517241
0.811277
0.023301
0
0.695652
0
0
0.047761
0
0
0
0
0
0
1
0
false
0
0.043478
0
1
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
9
38853f33b344b4b4b265da7dac34ccf6390e7d8f
269
py
Python
ex42.py
phyu-lay/python_exercises
fa0b81b20f445b27d4460534caf7ef5137b07c63
[ "MIT" ]
null
null
null
ex42.py
phyu-lay/python_exercises
fa0b81b20f445b27d4460534caf7ef5137b07c63
[ "MIT" ]
null
null
null
ex42.py
phyu-lay/python_exercises
fa0b81b20f445b27d4460534caf7ef5137b07c63
[ "MIT" ]
null
null
null
class Animal(object): pass class Dog(Animal): def__init__(self, name): self.name = name class Cat(Animal): def__init__(self, name): self.name = name class Person(object): def__init__(self, name): self.name = name
22.416667
36
0.591078
34
269
4.323529
0.323529
0.326531
0.22449
0.306122
0.70068
0.70068
0.70068
0.517007
0.517007
0
0
0
0.297398
269
11
37
24.454545
0.777778
0
0
0.545455
0
0
0
0
0
0
0
0
0
0
null
null
0.090909
0
null
null
0
0
0
0
null
1
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
7
38867eb361c12c0ebd4864ec423ecfec69bec8e7
13,693
py
Python
matbal/mbal.py
jikutlenova/pyreservoir
fb5bb5f408265da060859550ec16daf3b7a1c543
[ "MIT" ]
1
2020-09-18T14:33:14.000Z
2020-09-18T14:33:14.000Z
matbal/mbal.py
jikutlenova/pyreservoir
fb5bb5f408265da060859550ec16daf3b7a1c543
[ "MIT" ]
null
null
null
matbal/mbal.py
jikutlenova/pyreservoir
fb5bb5f408265da060859550ec16daf3b7a1c543
[ "MIT" ]
null
null
null
""" Material Balance Plots @author: Yohanes Nuwara @email: ign.nuwara97@gmail.com """ class drygas(): """ Dry-Gas Material Balance Plot """ def calculate_params(self, p, Bg, Gp, cf, cw, swi): """Calculate Material Balance Paramaters for Dry-Gas Reservoir""" import numpy as np pi = p[0] Bgi = Bg[0] # total gas FVF equals the gas FVF itself (for dry-gas) Btg = Bg # calculate Efw Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p) F = []; Eg = [] for i in range(len(p)): F_ = Bg[i] * Gp[i] Eg_ = Btg[i] - Bgi F.append(F_); Eg.append(Eg_) F = np.array(F); Eg = np.array(Eg) return F, Btg, Efw, Eg def plot(self, p, z, Gp, F, Btg, Efw, Eg): """Create Material Balance Plots for Dry-Gas Reservoir""" import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit # plot attributes title_size = 12 title_pad = 10 # linear function for curve-fit def linear_zero_intercept(x, m): y = m * x return y def linear_with_intercept(x, m, c): y = m * x + c return y # Plot 1: F vs Eg plt.subplot(3,2,1) x1, y1 = Eg, F plt.plot(x1, y1, '.-') plt.title('Plot 1: F vs Eg', size=title_size, pad=title_pad) plt.xlabel('Eg (RB/scf)') plt.ylabel('F (res ft3)') ## curve-fitting to calculate the slope as OGIP x1_norm = x1 / max(x1) # normalize x y1_norm = y1 / max(y1) # normalize y popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm) m = popt[0] Gfgi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP ## plot the regression line x1_fit = np.linspace(min(x1), max(x1), 5) y1_fit = linear_zero_intercept(x1_fit, Gfgi) plt.plot(x1_fit, y1_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3))) plt.legend() # Plot 2: p/z vs Gp plt.subplot(3,2,2) x2, y2 = Gp, (p / z) plt.plot(x2, y2, '.-') plt.title('Plot 2: p/z vs Gp', size=title_size, pad=title_pad) plt.xlabel('Gp (scf)') plt.ylabel('p/z (psia)') ## curve-fitting to calculate the slope as OGIP x2_norm = x2 / max(x2) # normalize x y2_norm = y2 / max(y2) # normalize y popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm) m, c = popt[0], popt[1] Gfgi = (-c / m) * max(x2) # OGIP is the intercept at x-axis, and denormalized m = m * max(y2) / max(x2) # denormalize the slope c = c * max(y2) # denormalize the intercept ## plot the regression line x2_fit = np.linspace(min(x2), max(x2), 5) y2_fit = linear_with_intercept(x2_fit, m, c) plt.plot(x2_fit, y2_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3))) plt.legend() # Plot 3: F/Eg vs Gp plt.subplot(3,2,3) x3, y3 = Gp, (F / Eg) plt.plot(x3, y3, '.-') plt.title('Plot 3: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad) plt.xlabel('Gp (scf)') plt.ylabel('F/Eg (scf)') ## curve-fitting to calculate the slope as OGIP, here [1:] because NaN is removed x3_norm = x3[1:] / max(x3[1:]) # normalize x y3_norm = y3[1:] / max(y3[1:]) # normalize y popt, pcov = curve_fit(linear_with_intercept, x3_norm, y3_norm) m, c = popt[0], popt[1] m = m * max(y3[1:]) / max(x3[1:]) # denormalize the slope Gfgi = c * max(y3[1:]) # denormalize the intercept, hence the OGIP ## plot the regression line x3_fit = np.linspace(min(x3[1:]), max(x3[1:]), 5) y3_fit = linear_with_intercept(x3_fit, m, Gfgi) plt.plot(x3_fit, y3_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3))) plt.legend() # Plot 6: F vs (Eg+Bgi*Efw) plt.subplot(3,2,4) Bgi = Btg[0] x6, y6 = (Eg + Bgi * Efw), F plt.plot(x6, y6, '.-') plt.title('Plot 6: F vs (Eg+Bgi*Efw)', size=title_size, pad=title_pad) plt.xlabel('Eg+Bgi*Efw (res ft3/scf)') plt.ylabel('F (res ft3)') ## curve-fitting to calculate the slope as OGIP x6_norm = x6 / max(x6) # normalize x y6_norm = y6 / max(y6) # normalize y popt, pcov = curve_fit(linear_zero_intercept, x6_norm, y6_norm) m = popt[0] Gfgi = m * max(y6) / max(x6) # denormalize the slope, hence the OGIP ## plot the regression line x6_fit = np.linspace(min(x6), max(x6), 5) y6_fit = linear_zero_intercept(x6_fit, Gfgi) plt.plot(x6_fit, y6_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3))) plt.legend() # Plot 7: ((p/z)*(1-Efw)) vs Gp plt.subplot(3,2,5) x7, y7 = Gp, ((p / z) * (1 - Efw)) plt.plot(x7, y7, '.-') plt.title('Plot 7: ((p/z)*(1-Efw)) vs Gp', size=title_size, pad=title_pad) plt.xlabel('Gp (scf)') plt.ylabel('(p/z)*(1-Efw) (psia)') ## curve-fitting to calculate the slope as OGIP x7_norm = x7 / max(x7) # normalize x y7_norm = y7 / max(y7) # normalize y popt, pcov = curve_fit(linear_with_intercept, x7_norm, y7_norm) m, c = popt[0], popt[1] Gfgi = (-c / m) * max(x7) # OGIP is the intercept at x-axis, and denormalized m = m * max(y7) / max(x7) # denormalize the slope c = c * max(y7) # denormalize the intercept ## plot the regression line x7_fit = np.linspace(min(x7), max(x7), 5) y7_fit = linear_with_intercept(x7_fit, m, c) plt.plot(x7_fit, y7_fit, label='{} MMSCF'.format(np.round(Gfgi * 1E-6, 3))) plt.legend() plt.tight_layout(pad=1.5) plt.show() return F, Eg, Efw class gascondensate(): """ Gas-Condensate Material Balance Plot """ def calculate_params(self, p, pdew, Bg, Bo, Np, Gp, Gi, cf, cw, swi, Rs, Rv): """Calculate Material Balance Paramaters for Gas-Condensate Reservoir""" import numpy as np pi = p[0] Rvi = Rv[0] Bgi = Bg[0] # calculate Efw Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p) # calculate F and Btg F = []; Btg = []; Eg = [] for i in range(len(p)): if p[i] >= pdew: # gas-condensate above dewpoint pressure F_ = Bg[i] * Gp[i] Btg_ = Bg[i] Eg_ = Btg_ - Bgi if p[i] < pdew: # gas-condensate below dewpoint pressure F_ = (Np[i] * ((Bo[i] - (Rs[i] * Bg[i])) / (1 - (Rv[i] * Rs[i])))) + ((Gp[i] - Gi[i]) * ((Bg[i] - (Rv[i] * Bo[i])) / (1 - (Rv[i] * Rs[i])))) Btg_ = ((Bg[i] * (1 - (Rs[i] * Rvi))) + (Bo[i] * (Rvi - Rv[i]))) / (1 - (Rv[i] * Rs[i])) # in RB/STB Eg_ = Btg_ - Bgi F.append(F_); Btg.append(Btg_); Eg.append(Eg_) F, Btg, Eg = np.array(F), np.array(Btg), np.array(Eg) return F, Btg, Efw, Eg def plot(self, p, z, Gp, F, Btg, Efw, Eg, Rv): """Create Material Balance Plots for Dry-Gas Reservoir""" import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit def calculate_condensate_inplace(Gfgi, Rv): """Calculate initial condensate-in-place from the calculated OGIP""" Rvi = Rv[0] condensate_inplace = Rvi * Gfgi # in STB return condensate_inplace # plot attributes title_size = 12 title_pad = 10 # linear function for curve-fit def linear_zero_intercept(x, m): y = m * x return y def linear_with_intercept(x, m, c): y = m * x + c return y # Plot 1: F vs Eg plt.subplot(3,2,1) x1, y1 = Eg, F plt.plot(x1, y1, '.-') plt.title('Plot 1: F vs Eg', size=title_size, pad=title_pad) plt.xlabel('Eg (RB/scf)') plt.ylabel('F (res ft3)') ## curve-fitting to calculate the slope as OGIP x1_norm = x1 / max(x1) # normalize x y1_norm = y1 / max(y1) # normalize y popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm) m = popt[0] Gfgi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP ## calculate condensate-in-place condensate_inplace = calculate_condensate_inplace(Gfgi, Rv) ## plot the regression line x1_fit = np.linspace(min(x1), max(x1), 5) y1_fit = linear_zero_intercept(x1_fit, Gfgi) plt.plot(x1_fit, y1_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3))) plt.legend() # Plot 2: p/z vs Gp plt.subplot(3,2,2) plt.title('Plot 2: p/z vs Gp', size=title_size, pad=title_pad) plt.xlabel('Gp (scf)') plt.ylabel('p/z (psia)') if np.all(z==0) == False: x2, y2 = Gp, (p / z) plt.plot(x2, y2, '.-') ## curve-fitting to calculate the slope as OGIP x2_norm = x2 / max(x2) # normalize x y2_norm = y2 / max(y2) # normalize y popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm) m, c = popt[0], popt[1] Gfgi = (-c / m) * max(x2) # OGIP is the intercept at x-axis, and denormalized m = m * max(y2) / max(x2) # denormalize the slope c = c * max(y2) # denormalize the intercept ## calculate condensate-in-place condensate_inplace = calculate_condensate_inplace(Gfgi, Rv) ## plot the regression line x2_fit = np.linspace(min(x2), max(x2), 5) y2_fit = linear_with_intercept(x2_fit, m, c) plt.plot(x2_fit, y2_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3))) plt.legend() # Plot 3: F/Eg vs Gp plt.subplot(3,2,3) x3, y3 = Gp, (F / Eg) plt.plot(x3, y3, '.-') plt.title('Plot 3: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad) plt.xlabel('Gp (scf)') plt.ylabel('F/Eg (scf)') ## curve-fitting to calculate the slope as OGIP, here [1:] because NaN is removed x3_norm = x3[1:] / max(x3[1:]) # normalize x y3_norm = y3[1:] / max(y3[1:]) # normalize y popt, pcov = curve_fit(linear_with_intercept, x3_norm, y3_norm) m, c = popt[0], popt[1] m = m * max(y3[1:]) / max(x3[1:]) # denormalize the slope Gfgi = c * max(y3[1:]) # denormalize the intercept, hence the OGIP ## calculate condensate-in-place condensate_inplace = calculate_condensate_inplace(Gfgi, Rv) ## plot the regression line x3_fit = np.linspace(min(x3[1:]), max(x3[1:]), 5) y3_fit = linear_with_intercept(x3_fit, m, Gfgi) plt.plot(x3_fit, y3_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3))) plt.legend() # Plot 6: F vs (Eg+Bgi*Efw) plt.subplot(3,2,4) Bgi = Btg[0] x6, y6 = (Eg + Bgi * Efw), F plt.plot(x6, y6, '.-') plt.title('Plot 6: F vs (Eg+Bgi*Efw)', size=title_size, pad=title_pad) plt.xlabel('Eg+Bgi*Efw (res ft3/scf)') plt.ylabel('F (res ft3)') ## curve-fitting to calculate the slope as OGIP x6_norm = x6 / max(x6) # normalize x y6_norm = y6 / max(y6) # normalize y popt, pcov = curve_fit(linear_zero_intercept, x6_norm, y6_norm) m = popt[0] Gfgi = m * max(y6) / max(x6) # denormalize the slope, hence the OGIP ## calculate condensate-in-place condensate_inplace = calculate_condensate_inplace(Gfgi, Rv) ## plot the regression line x6_fit = np.linspace(min(x6), max(x6), 5) y6_fit = linear_zero_intercept(x6_fit, Gfgi) plt.plot(x6_fit, y6_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3))) plt.legend() # Plot 7: ((p/z)*(1-Efw)) vs Gp plt.subplot(3,2,5) plt.title('Plot 7: ((p/z)*(1-Efw)) vs Gp', size=title_size, pad=title_pad) plt.xlabel('Gp (scf)') plt.ylabel('(p/z)*(1-Efw) (psia)') if np.all(z==0) == False: x7, y7 = Gp, ((p / z) * (1 - Efw)) plt.plot(x7, y7, '.-') ## curve-fitting to calculate the slope as OGIP x7_norm = x7 / max(x7) # normalize x y7_norm = y7 / max(y7) # normalize y popt, pcov = curve_fit(linear_with_intercept, x7_norm, y7_norm) m, c = popt[0], popt[1] Gfgi = (-c / m) * max(x7) # OGIP is the intercept at x-axis, and denormalized m = m * max(y7) / max(x7) # denormalize the slope c = c * max(y7) # denormalize the intercept ## calculate condensate-in-place condensate_inplace = calculate_condensate_inplace(Gfgi, Rv) ## plot the regression line x7_fit = np.linspace(min(x7), max(x7), 5) y7_fit = linear_with_intercept(x7_fit, m, c) plt.plot(x7_fit, y7_fit, label='(G) {} MMSCF (C) {} MSTB'.format(np.round(Gfgi * 1E-6, 3), np.round(condensate_inplace * 1E-3, 3))) plt.legend() plt.tight_layout(pad=1.5) plt.show() return F, Eg, Efw
36.80914
156
0.537063
2,048
13,693
3.484863
0.078613
0.019616
0.037271
0.03699
0.919854
0.903461
0.894213
0.885526
0.864649
0.859325
0
0.043353
0.317754
13,693
371
157
36.908356
0.720617
0.202731
0
0.864979
0
0
0.061622
0
0
0
0
0
0
1
0.037975
false
0
0.033755
0
0.118143
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
38ba8bb6b06a6d996f1843b5202cbf63291bc394
111
py
Python
autorop/libc/__init__.py
mariuszskon/autorop
5735073008f722fab00f3866ef4a05f04620593b
[ "MIT" ]
15
2020-10-03T05:20:31.000Z
2022-03-20T06:19:29.000Z
autorop/libc/__init__.py
mariuszskon/autorop
5735073008f722fab00f3866ef4a05f04620593b
[ "MIT" ]
8
2020-10-02T09:51:39.000Z
2021-04-24T03:14:18.000Z
autorop/libc/__init__.py
mariuszskon/autorop
5735073008f722fab00f3866ef4a05f04620593b
[ "MIT" ]
2
2021-04-16T06:33:49.000Z
2021-09-03T09:21:10.000Z
from autorop.libc.Auto import Auto from autorop.libc.Rip import Rip from autorop.libc.Database import Database
27.75
42
0.837838
18
111
5.166667
0.388889
0.354839
0.483871
0
0
0
0
0
0
0
0
0
0.108108
111
3
43
37
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
38c6640d6c476c2dc2cc5d403e5d2036266a3999
2,666
py
Python
examples/torch_graph.py
eddy-ilg/itypes
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
[ "MIT" ]
null
null
null
examples/torch_graph.py
eddy-ilg/itypes
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
[ "MIT" ]
null
null
null
examples/torch_graph.py
eddy-ilg/itypes
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import torch from copy import copy, deepcopy # # This example illustrates when torch builds a gradient graph, # how it can be prevented, and when torch shares or copies tensor # data def print_sep(): print() print("---------------------------------------------------------------") print() print_sep() x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True) print(f'{"x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True):":55}', x) y = x.clone() print(f'{"y = x.clone():":55}', y) z = y.detach() print(f'{"z = y.detach():":55}', z) print() print("x[1] = 0 is not possible because x is a leaf variable that requires gradient.") print("Note that z does not require a gradient as it was detached.") print() y[1] = 0 print("After y[1] = 0:") print(f'{"x = ":55}', x) print(f'{"y = ":55}', y) print(f'{"z = ":55}', z) print() z[1] = 5 print("After z[1] = 5:") print(f'{"x = ":55}', x) print(f'{"y = ":55}', y) print(f'{"z = ":55}', z) print_sep() x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True) print(f'{"x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True):":55}', x) y = copy(x) print(f'{"y = copy(x):":55}', y) print() print("x[1] = 0 is not possible because x is a leaf variable that requires gradient.") print("y[1] = 0 is not possible because y is a leaf variable that requires gradient.") print("Note that y is a second leaf node (not a child of x as previously).") print_sep() x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True) print(f'{"x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True):":55}', x) y = deepcopy(x) print(f'{"y = deepcopy(x):":55}', y) print() print("x[1] = 0 is not possible because x is a leaf variable that requires gradient.") print("y[1] = 0 is not possible because y is a leaf variable that requires gradient.") print("Note that y is a second leaf node (not a child of x as previously).") print_sep() x = torch.tensor([1.0, 2.0, 3.0]) print("requires_grad = False by default") print() print(f'{"x = torch.tensor([1.0, 2.0, 3.0]):":55}', x) y = copy(x) print(f'{"y = copy(x):":55}', y) print() x[1] = 0 print("After x[1] = 0:") print(f'{"x = ":55}', x) print(f'{"y = ":55}', y) print() y[1] = 5 print("After y[1] = 5:") print(f'{"x = ":55}', x) print(f'{"y = ":55}', y) print_sep() x = torch.tensor([1.0, 2.0, 3.0]) print("requires_grad = False by default") print() print(f'{"x = torch.tensor([1.0, 2.0, 3.0]):":55}', x) y = deepcopy(x) print(f'{"y = deepcopy(x):":55}', y) print() x[1] = 0 print("After x[1] = 0:") print(f'{"x = ":55}', x) print(f'{"y = ":55}', y) print() y[1] = 5 print("After y[1] = 5:") print(f'{"x = ":55}', x) print(f'{"y = ":55}', y) print_sep()
23.803571
86
0.582146
505
2,666
3.043564
0.134653
0.097593
0.050098
0.08458
0.802212
0.802212
0.802212
0.802212
0.802212
0.802212
0
0.064617
0.15829
2,666
111
87
24.018018
0.620321
0.056639
0
0.839506
0
0.061728
0.534476
0.055002
0
0
0
0
0
1
0.012346
false
0
0.024691
0
0.037037
0.765432
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
8
38d1e1bd8c199243464644f193cf71c3f3a73a56
255
py
Python
hxlm/plugin/xa_humanitarian/sensitive.py
EticaAI/HXL-Data-Science-file-formats
c7c5aa56c452ac1613242ee04cc9ae66f38ec24d
[ "Unlicense" ]
3
2021-01-25T20:44:10.000Z
2021-04-19T22:47:05.000Z
hxlm/plugin/xa_humanitarian/sensitive.py
fititnt/HXL-Data-Science-file-formats
f4fe9866e53280767f9cb4c8c488ef9c8b9d33cd
[ "Unlicense" ]
24
2021-01-26T00:36:39.000Z
2021-11-13T23:59:56.000Z
hxlm/plugin/xa_humanitarian/sensitive.py
fititnt/HXL-Data-Science-file-formats
f4fe9866e53280767f9cb4c8c488ef9c8b9d33cd
[ "Unlicense" ]
1
2021-09-05T03:43:37.000Z
2021-09-05T03:43:37.000Z
# TODO: https://centre.humdata.org/introducing-the-working-draft-of-the-ocha-data-responsibility-guidelines/ # TODO: https://centre.humdata.org/wp-content/uploads/2019/03/OCHA-DR-Guidelines-working-draft-032019.pdf def hello(): print('hello')
36.428571
109
0.745098
36
255
5.277778
0.694444
0.094737
0.157895
0.231579
0.263158
0
0
0
0
0
0
0.051282
0.082353
255
6
110
42.5
0.760684
0.823529
0
0
0
0
0.138889
0
0
0
0
0.166667
0
1
0.5
true
0
0
0
0.5
0.5
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
1
1
0
0
0
0
1
0
7
2a25246f0dac17e0dec42ba3cc43cbb358d4ac4a
411
py
Python
Platforms/Web/Processing/Api/Account/__init__.py
The-CJ/Phaazebot
83a9563d210718071d4e2cdcca3b212c87abaf51
[ "MIT" ]
2
2017-09-14T08:07:55.000Z
2021-05-18T05:05:05.000Z
Platforms/Web/Processing/Api/Account/__init__.py
The-CJ/Phaazebot
83a9563d210718071d4e2cdcca3b212c87abaf51
[ "MIT" ]
111
2018-04-15T14:32:14.000Z
2021-03-28T21:06:29.000Z
Platforms/Web/Processing/Api/Account/__init__.py
The-CJ/Phaazebot
83a9563d210718071d4e2cdcca3b212c87abaf51
[ "MIT" ]
1
2018-04-15T13:24:44.000Z
2018-04-15T13:24:44.000Z
import Platforms.Web.Processing.Api.Account.create as create import Platforms.Web.Processing.Api.Account.edit as edit import Platforms.Web.Processing.Api.Account.errors as errors import Platforms.Web.Processing.Api.Account.get as get import Platforms.Web.Processing.Api.Account.login as login import Platforms.Web.Processing.Api.Account.logout as logout import Platforms.Web.Processing.Api.Account.main as main
51.375
60
0.846715
63
411
5.52381
0.222222
0.301724
0.362069
0.563218
0.764368
0.764368
0
0
0
0
0
0
0.068127
411
7
61
58.714286
0.908616
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
2a2e6485addcac2bdd8fee6271f68d9167db181b
117
py
Python
checkov/terraform/checks/data/__init__.py
peaudecastor/checkov
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
[ "Apache-2.0" ]
null
null
null
checkov/terraform/checks/data/__init__.py
peaudecastor/checkov
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
[ "Apache-2.0" ]
null
null
null
checkov/terraform/checks/data/__init__.py
peaudecastor/checkov
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
[ "Apache-2.0" ]
null
null
null
from checkov.terraform.checks.data.aws import * # noqa from checkov.terraform.checks.data.external import * # noqa
39
60
0.777778
16
117
5.6875
0.5625
0.241758
0.43956
0.571429
0.659341
0
0
0
0
0
0
0
0.119658
117
2
61
58.5
0.883495
0.076923
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
aac86b7b4c7c973276391b5bba878d1583b589b1
3,681
py
Python
src/data_match/parse_test.py
Verdex/dross
9fe0c839d3e89d343be418c7154bb665bfd75225
[ "MIT" ]
null
null
null
src/data_match/parse_test.py
Verdex/dross
9fe0c839d3e89d343be418c7154bb665bfd75225
[ "MIT" ]
null
null
null
src/data_match/parse_test.py
Verdex/dross
9fe0c839d3e89d343be418c7154bb665bfd75225
[ "MIT" ]
null
null
null
import unittest from .matching import * from .parse import * class ParseTest(unittest.TestCase): def test_parse_matcher_should_generate_match_any_string(self): output = parse_matcher(".").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is MatchAnyString) def test_parse_matcher_should_generate_match_until_end(self): output = parse_matcher("*").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is MatchUntilEnd) def test_parse_matcher_should_generate_capture_string(self): output = parse_matcher("String").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is CaptureString) self.assertEqual(output[0].capture_name, "String") def test_parse_matcher_should_generate_match_string_with_value(self): output = parse_matcher("string").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is MatchStringWithValue) self.assertEqual(output[0].value, "string") def test_parse_matcher_should_generate_match_data_with_name(self): output = parse_matcher("string()").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is MatchDataWithName) self.assertEqual(output[0].target_name, "string") def test_parse_matcher_should_generate_capture_data(self): output = parse_matcher("String()").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is CaptureData) self.assertEqual(output[0].capture_name, "String") def test_parse_matcher_should_generate_capture_data_with_sub_matchers(self): output = parse_matcher("String(a, ., X(), a(), *)").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is CaptureData) self.assertEqual(output[0].capture_name, "String") self.assertEqual(len(output[0].sub_matchers), 5) self.assertTrue(type(output[0].sub_matchers[0]) is MatchStringWithValue) self.assertTrue(type(output[0].sub_matchers[1]) is MatchAnyString) self.assertTrue(type(output[0].sub_matchers[2]) is CaptureData) self.assertTrue(type(output[0].sub_matchers[3]) is MatchDataWithName) self.assertTrue(type(output[0].sub_matchers[4]) is MatchUntilEnd) def test_parse_matcher_should_generate_match_data_with_name_with_sub_matchers(self): output = parse_matcher("string(a, ., X(), a(), *)").matchers self.assertEqual(len(output), 1) self.assertTrue(type(output[0]) is MatchDataWithName) self.assertEqual(output[0].target_name, "string") self.assertEqual(len(output[0].sub_matchers), 5) self.assertTrue(type(output[0].sub_matchers[0]) is MatchStringWithValue) self.assertTrue(type(output[0].sub_matchers[1]) is MatchAnyString) self.assertTrue(type(output[0].sub_matchers[2]) is CaptureData) self.assertTrue(type(output[0].sub_matchers[3]) is MatchDataWithName) self.assertTrue(type(output[0].sub_matchers[4]) is MatchUntilEnd) def test_parse_matcher_should_parse_multiple_matchers(self): output = parse_matcher("string(1,2,3), ., string, String, *").matchers self.assertEqual(len(output), 5) self.assertTrue(type(output[0]) is MatchDataWithName) self.assertTrue(type(output[1]) is MatchAnyString) self.assertTrue(type(output[2]) is MatchStringWithValue) self.assertTrue(type(output[3]) is CaptureString) self.assertTrue(type(output[4]) is MatchUntilEnd) if __name__ == '__main__': unittest.main()
48.434211
88
0.706058
459
3,681
5.440087
0.108932
0.075691
0.165799
0.221065
0.888666
0.883861
0.827393
0.777733
0.735683
0.735683
0
0.018015
0.170606
3,681
76
89
48.434211
0.799869
0
0
0.46875
1
0
0.043183
0
0
0
0
0
0.625
1
0.140625
false
0
0.046875
0
0.203125
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
9
2afc12b718ee5320ea23384f5dbd0f8e8c3cc35c
48,665
py
Python
app/grafana/dashboard.py
zephyrxvxx7/KubeZephyr-Backend
242410bc236e1f7204c24d635eb3346b0c256dc8
[ "MIT" ]
2
2021-04-25T01:49:45.000Z
2021-11-25T09:10:40.000Z
app/grafana/dashboard.py
zephyrxvxx7/KubeZephyr-Backend
242410bc236e1f7204c24d635eb3346b0c256dc8
[ "MIT" ]
null
null
null
app/grafana/dashboard.py
zephyrxvxx7/KubeZephyr-Backend
242410bc236e1f7204c24d635eb3346b0c256dc8
[ "MIT" ]
null
null
null
import requests from app.models.user import User from app.core.config import GRAFANA_API_KEY, GRAFANA_SERVER headers = {'Authorization': f"Bearer {GRAFANA_API_KEY}", 'Content-Type': 'application/json'} def create_dashboard(json: dict): return requests.post(f"{GRAFANA_SERVER}/api/dashboards/db", json=json, headers=headers) def update_dashboard(json: dict): return requests.post(f"{GRAFANA_SERVER}/api/dashboards/db", json=json, headers=headers) def delete_dashboard_by_uid(uid: str): return requests.delete(f"{GRAFANA_SERVER}/api/dashboards/uid/{uid}", headers=headers) def generate_pod_dashboard_with_alert(user: User, pod_name: str): notifications_channel = namespace = str(user.id) return { 'dashboard': { 'description': 'This dashboard is auto-generated by KubeZephyr. PLEASE DO NOT MODIFY', 'panels': [ { 'collapsed': False, 'datasource': None, 'gridPos': {'h': 1, 'w': 24, 'x': 0, 'y': 0}, 'id': 34, 'panels': [], 'repeat': None, 'title': 'Dashboard', 'type': 'row', }, { 'datasource': 'Prometheus', 'fieldConfig': { 'defaults': { 'color': {'mode': 'thresholds'}, 'mappings': [], 'thresholds': { 'mode': 'absolute', 'steps': [{'color': 'green', 'value': None}], }, }, 'overrides': [ { 'matcher': {'id': 'byName', 'options': 'Failed'}, 'properties': [ { 'id': 'thresholds', 'value': { 'mode': 'absolute', 'steps': [ {'color': 'green', 'value': None}, {'color': 'semi-dark-red', 'value': 1}, ], }, }, { 'id': 'mappings', 'value': [ { 'from': '', 'id': 1, 'text': 'Failed', 'to': '', 'type': 1, 'value': '1', } ], }, ], }, { 'matcher': {'id': 'byName', 'options': 'Pending'}, 'properties': [ { 'id': 'thresholds', 'value': { 'mode': 'absolute', 'steps': [ {'color': 'green', 'value': None}, {'color': 'light-orange', 'value': 1}, ], }, }, { 'id': 'mappings', 'value': [ { 'from': '', 'id': 1, 'text': 'Pending', 'to': '', 'type': 1, 'value': '1', } ], }, ], }, { 'matcher': {'id': 'byName', 'options': 'Running'}, 'properties': [ { 'id': 'thresholds', 'value': { 'mode': 'absolute', 'steps': [ {'color': 'green', 'value': None}, {'color': 'light-green', 'value': 1}, ], }, }, { 'id': 'mappings', 'value': [ { 'from': '', 'id': 1, 'text': 'Running', 'to': '', 'type': 1, 'value': '1', } ], }, ], }, { 'matcher': {'id': 'byName', 'options': 'Succeeded'}, 'properties': [ { 'id': 'thresholds', 'value': { 'mode': 'absolute', 'steps': [ {'color': 'green', 'value': None}, {'color': 'light-blue', 'value': 1}, ], }, }, { 'id': 'mappings', 'value': [ { 'from': '', 'id': 1, 'text': 'Succeeded', 'to': '', 'type': 1, 'value': '1', } ], }, ], }, { 'matcher': {'id': 'byName', 'options': 'Unknown'}, 'properties': [ { 'id': 'thresholds', 'value': { 'mode': 'absolute', 'steps': [ {'color': 'green', 'value': None}, {'color': 'rgb(87, 87, 87)', 'value': 1}, ], }, }, { 'id': 'mappings', 'value': [ { 'from': '', 'id': 1, 'text': 'Unknown', 'to': '', 'type': 1, 'value': '1', } ], }, ], }, ], }, 'gridPos': {'h': 5, 'w': 5, 'x': 0, 'y': 1}, 'id': 8, 'links': [], 'options': { 'colorMode': 'value', 'graphMode': 'none', 'justifyMode': 'center', 'orientation': 'auto', 'reduceOptions': { 'calcs': ['lastNotNull'], 'fields': '', 'values': False, }, 'text': {}, 'textMode': 'auto', }, 'pluginVersion': '7.5.5', 'targets': [ { 'exemplar': True, 'expr': f'sum(kube_pod_status_phase{{namespace=~"{namespace}",pod=~"{pod_name}"}} == 1) by(phase)', 'format': 'time_series', 'instant': True, 'interval': '', 'intervalFactor': 1, 'legendFormat': '{{ phase }}', 'refId': 'A', 'step': 2, } ], 'timeFrom': None, 'timeShift': None, 'title': 'Status', 'type': 'stat', }, { 'alert': { 'alertRuleTags': {}, 'conditions': [ { 'evaluator': {'params': [1], 'type': 'gt'}, 'operator': {'type': 'and'}, 'query': {'params': ['A', '1m', 'now']}, 'reducer': {'params': [], 'type': 'sum'}, 'type': 'query', } ], 'executionErrorState': 'alerting', 'for': '1m', 'frequency': '1m', 'handler': 1, 'message': f'錯誤:您的容器 {pod_name} 偵測到重新啟動,請確認設定是否皆為正確。', 'name': f'{user.realName}-{pod_name}: Container Restart alert', 'noDataState': 'ok', 'notifications': [{'uid': f'{notifications_channel}'}], }, 'aliasColors': {}, 'bars': False, 'dashLength': 10, 'dashes': False, 'datasource': 'Prometheus', 'fieldConfig': {'defaults': {}, 'overrides': []}, 'fill': 1, 'fillGradient': 0, 'gridPos': {'h': 5, 'w': 19, 'x': 5, 'y': 1}, 'hiddenSeries': False, 'id': 12, 'legend': { 'avg': False, 'current': False, 'max': False, 'min': False, 'show': False, 'total': False, 'values': False, }, 'lines': True, 'linewidth': 1, 'links': [], 'nullPointMode': 'null', 'options': {'alertThreshold': True}, 'percentage': False, 'pluginVersion': '7.5.5', 'pointradius': 2, 'points': False, 'renderer': 'flot', 'seriesOverrides': [], 'spaceLength': 10, 'stack': False, 'steppedLine': False, 'targets': [ { 'exemplar': True, 'expr': f'kube_pod_container_status_restarts_total{{namespace=~"{namespace}", pod=~"{pod_name}"}}', 'format': 'time_series', 'hide': False, 'interval': '', 'intervalFactor': 2, 'legendFormat': '{{ container }}', 'refId': 'A', 'step': 2, } ], 'thresholds': [ { 'colorMode': 'critical', 'fill': True, 'line': True, 'op': 'gt', 'value': 1, 'visible': True, } ], 'timeFrom': None, 'timeRegions': [], 'timeShift': None, 'title': 'Container Restart', 'tooltip': {'shared': True, 'sort': 0, 'value_type': 'individual'}, 'type': 'graph', 'xaxis': { 'buckets': None, 'mode': 'time', 'name': None, 'show': True, 'values': [], }, 'yaxes': [ { '$$hashKey': 'object:79', 'decimals': 0, 'format': 'none', 'label': None, 'logBase': 1, 'max': None, 'min': '0', 'show': True, }, { '$$hashKey': 'object:80', 'format': 'short', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, ], 'yaxis': {'align': False, 'alignLevel': None}, }, { 'collapsed': False, 'datasource': None, 'gridPos': {'h': 1, 'w': 24, 'x': 0, 'y': 6}, 'id': 35, 'panels': [], 'repeat': None, 'title': 'Resource', 'type': 'row', }, { 'cacheTimeout': None, 'datasource': None, 'fieldConfig': { 'defaults': { 'color': {'mode': 'thresholds'}, 'decimals': 2, 'mappings': [ { 'id': 0, 'op': '=', 'text': 'N/A', 'type': 1, 'value': 'null', } ], 'thresholds': { 'mode': 'absolute', 'steps': [ {'color': 'light-green', 'value': None}, {'color': 'light-orange', 'value': 60}, {'color': 'semi-dark-red', 'value': 80}, ], }, 'unit': 'percent', }, 'overrides': [], }, 'gridPos': {'h': 5, 'w': 5, 'x': 0, 'y': 7}, 'id': 4, 'interval': None, 'links': [], 'maxDataPoints': 100, 'options': { 'colorMode': 'value', 'graphMode': 'area', 'justifyMode': 'auto', 'orientation': 'horizontal', 'reduceOptions': { 'calcs': ['lastNotNull'], 'fields': '', 'values': False, }, 'text': {}, 'textMode': 'auto', }, 'pluginVersion': '7.5.5', 'targets': [ { 'exemplar': True, 'expr': f'rate(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{{namespace=~"{namespace}", container!="POD", pod=~"{pod_name}"}}[1m])', 'format': 'time_series', 'instant': False, 'interval': '', 'intervalFactor': 1, 'legendFormat': '', 'refId': 'A', 'step': 20, } ], 'title': 'CPU', 'type': 'stat', }, { 'alert': { 'alertRuleTags': {}, 'conditions': [ { 'evaluator': {'params': [90], 'type': 'gt'}, 'operator': {'type': 'and'}, 'query': {'params': ['A', '5m', 'now']}, 'reducer': {'params': [], 'type': 'avg'}, 'type': 'query', } ], 'executionErrorState': 'alerting', 'for': '5m', 'frequency': '1m', 'handler': 1, 'message': f'警告:您的容器 {pod_name} 的CPU使用率已經連續一分鐘超過90%。', 'name': f'{user.realName}-{pod_name}: CPU Usage alert', 'noDataState': 'ok', 'notifications': [{'uid': f'{notifications_channel}'}], }, 'aliasColors': {}, 'bars': False, 'dashLength': 10, 'dashes': False, 'datasource': 'Prometheus', 'editable': True, 'error': False, 'fieldConfig': {'defaults': {'unit': 'percent'}, 'overrides': []}, 'fill': 1, 'fillGradient': 0, 'grid': {}, 'gridPos': {'h': 5, 'w': 9, 'x': 5, 'y': 7}, 'hiddenSeries': False, 'id': 40, 'legend': { 'alignAsTable': True, 'avg': True, 'current': True, 'max': False, 'min': False, 'rightSide': True, 'show': False, 'total': False, 'values': True, }, 'lines': True, 'linewidth': 2, 'links': [], 'nullPointMode': 'connected', 'options': {'alertThreshold': True}, 'paceLength': 10, 'percentage': False, 'pluginVersion': '7.5.5', 'pointradius': 5, 'points': False, 'renderer': 'flot', 'seriesOverrides': [], 'spaceLength': 10, 'stack': False, 'steppedLine': False, 'targets': [ { 'exemplar': True, 'expr': f'rate(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{{namespace=~"{namespace}", container!="POD", pod=~"{pod_name}"}}[1m])', 'format': 'time_series', 'instant': False, 'interval': '', 'intervalFactor': 1, 'legendFormat': 'Current', 'refId': 'A', 'step': 2, } ], 'thresholds': [ { 'colorMode': 'critical', 'fill': True, 'line': True, 'op': 'gt', 'value': 90, 'visible': True, } ], 'timeFrom': None, 'timeRegions': [], 'timeShift': None, 'title': 'CPU Usage', 'tooltip': { 'msResolution': True, 'shared': True, 'sort': 0, 'value_type': 'cumulative', }, 'type': 'graph', 'xaxis': { 'buckets': None, 'mode': 'time', 'name': None, 'show': True, 'values': [], }, 'yaxes': [ { '$$hashKey': 'object:1172', 'format': 'percent', 'label': None, 'logBase': 1, 'max': '100', 'min': '0', 'show': True, }, { '$$hashKey': 'object:1173', 'format': 'short', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, ], 'yaxis': {'align': False, 'alignLevel': None}, }, { 'aliasColors': {}, 'bars': False, 'dashLength': 10, 'dashes': False, 'datasource': 'Prometheus', 'editable': True, 'error': False, 'fieldConfig': {'defaults': {}, 'overrides': []}, 'fill': 1, 'fillGradient': 0, 'grid': {}, 'gridPos': {'h': 5, 'w': 10, 'x': 14, 'y': 7}, 'hiddenSeries': False, 'id': 2, 'legend': { 'alignAsTable': True, 'avg': True, 'current': True, 'max': False, 'min': False, 'rightSide': True, 'show': False, 'total': False, 'values': True, }, 'lines': True, 'linewidth': 2, 'links': [], 'nullPointMode': 'connected', 'options': {'alertThreshold': True}, 'paceLength': 10, 'percentage': False, 'pluginVersion': '7.5.5', 'pointradius': 5, 'points': False, 'renderer': 'flot', 'seriesOverrides': [], 'spaceLength': 10, 'stack': False, 'steppedLine': False, 'targets': [ { 'exemplar': True, 'expr': f'sum by (container)(rate(container_cpu_usage_seconds_total{{namespace=~"{namespace}", pod=~"{pod_name}", container!="", container!="POD", image!=""}}[2m]))', 'format': 'time_series', 'instant': False, 'interval': '', 'intervalFactor': 1, 'legendFormat': 'Current', 'refId': 'A', 'step': 2, }, { 'exemplar': True, 'expr': f'kube_pod_container_resource_limits_cpu_cores{{namespace=~"{namespace}", pod=~"{pod_name}", container!="POD"}}', 'format': 'time_series', 'interval': '', 'intervalFactor': 1, 'legendFormat': 'Limit', 'refId': 'C', 'step': 2, }, ], 'thresholds': [], 'timeFrom': None, 'timeRegions': [], 'timeShift': None, 'title': 'CPU Usage', 'tooltip': { 'msResolution': True, 'shared': True, 'sort': 0, 'value_type': 'cumulative', }, 'type': 'graph', 'xaxis': { 'buckets': None, 'mode': 'time', 'name': None, 'show': True, 'values': [], }, 'yaxes': [ { '$$hashKey': 'object:1092', 'format': 'short', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, { '$$hashKey': 'object:1093', 'format': 'short', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, ], 'yaxis': {'align': False, 'alignLevel': None}, }, { 'cacheTimeout': None, 'datasource': 'Prometheus', 'fieldConfig': { 'defaults': { 'color': {'fixedColor': 'rgb(87, 87, 87)', 'mode': 'fixed'}, 'mappings': [ { 'id': 0, 'op': '=', 'text': 'N/A', 'type': 1, 'value': 'null', } ], 'thresholds': { 'mode': 'absolute', 'steps': [{'color': 'green', 'value': None}], }, 'unit': 'bytes', }, 'overrides': [], }, 'gridPos': {'h': 5, 'w': 5, 'x': 0, 'y': 12}, 'id': 5, 'interval': None, 'links': [], 'maxDataPoints': 100, 'options': { 'colorMode': 'value', 'graphMode': 'area', 'justifyMode': 'auto', 'orientation': 'horizontal', 'reduceOptions': { 'calcs': ['lastNotNull'], 'fields': '', 'values': False, }, 'text': {}, 'textMode': 'auto', }, 'pluginVersion': '7.5.5', 'targets': [ { 'exemplar': True, 'expr': f'sum(container_memory_working_set_bytes{{namespace=~"{namespace}", pod=~"{pod_name}", container!="POD", container!=""}})', 'format': 'time_series', 'hide': False, 'interval': '', 'intervalFactor': 1, 'legendFormat': '', 'refId': 'A', 'step': 20, } ], 'title': 'Memory', 'type': 'stat', }, { 'alert': { 'alertRuleTags': {}, 'conditions': [ { 'evaluator': {'params': [90], 'type': 'gt'}, 'operator': {'type': 'and'}, 'query': {'params': ['A', '1m', 'now']}, 'reducer': {'params': [], 'type': 'max'}, 'type': 'query', } ], 'executionErrorState': 'alerting', 'for': '1m', 'frequency': '1m', 'handler': 1, 'message': f'警告:您的容器 {pod_name} 偵測到記憶體利用率超過90%。', 'name': f'{user.realName}-{pod_name}: Memory Usage alert', 'noDataState': 'ok', 'notifications': [{'uid': f'{notifications_channel}'}], }, 'aliasColors': {}, 'bars': False, 'dashLength': 10, 'dashes': False, 'datasource': 'Prometheus', 'editable': False, 'error': False, 'fieldConfig': {'defaults': {}, 'overrides': []}, 'fill': 1, 'fillGradient': 0, 'grid': {}, 'gridPos': {'h': 5, 'w': 9, 'x': 5, 'y': 12}, 'hiddenSeries': False, 'id': 41, 'legend': { 'alignAsTable': True, 'avg': False, 'current': False, 'max': False, 'min': False, 'rightSide': True, 'show': False, 'total': False, 'values': False, }, 'lines': True, 'linewidth': 2, 'links': [], 'nullPointMode': 'connected', 'options': {'alertThreshold': True}, 'paceLength': 10, 'percentage': False, 'pluginVersion': '7.5.5', 'pointradius': 5, 'points': False, 'renderer': 'flot', 'seriesOverrides': [], 'spaceLength': 10, 'stack': False, 'steppedLine': False, 'targets': [ { 'exemplar': True, 'expr': f'sum(container_memory_working_set_bytes{{namespace=~"{namespace}", pod=~"{pod_name}", container!="POD", container!="", image!=""}}) / sum(kube_pod_container_resource_limits_memory_bytes{{namespace=~"{namespace}", pod=~"{pod_name}"}}) * 100', 'format': 'time_series', 'interval': '10s', 'intervalFactor': 1, 'legendFormat': 'Current', 'metric': 'container_memory_usage_bytes', 'refId': 'A', 'step': 10, } ], 'thresholds': [ { 'colorMode': 'critical', 'fill': True, 'line': True, 'op': 'gt', 'value': 90, 'visible': True, } ], 'timeFrom': None, 'timeRegions': [], 'timeShift': None, 'title': 'Memory Usage', 'tooltip': { 'msResolution': True, 'shared': True, 'sort': 0, 'value_type': 'cumulative', }, 'type': 'graph', 'xaxis': { 'buckets': None, 'mode': 'time', 'name': None, 'show': True, 'values': [], }, 'yaxes': [ { '$$hashKey': 'object:818', 'format': 'percent', 'label': None, 'logBase': 1, 'max': '100', 'min': '0', 'show': True, }, { '$$hashKey': 'object:819', 'format': 'short', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, ], 'yaxis': {'align': False, 'alignLevel': None}, }, { 'aliasColors': {}, 'bars': False, 'dashLength': 10, 'dashes': False, 'datasource': 'Prometheus', 'editable': False, 'error': False, 'fieldConfig': {'defaults': {}, 'overrides': []}, 'fill': 1, 'fillGradient': 0, 'grid': {}, 'gridPos': {'h': 5, 'w': 10, 'x': 14, 'y': 12}, 'hiddenSeries': False, 'id': 1, 'legend': { 'alignAsTable': True, 'avg': False, 'current': False, 'max': False, 'min': False, 'rightSide': True, 'show': False, 'total': False, 'values': False, }, 'lines': True, 'linewidth': 2, 'links': [], 'nullPointMode': 'connected', 'options': {'alertThreshold': True}, 'paceLength': 10, 'percentage': False, 'pluginVersion': '7.5.5', 'pointradius': 5, 'points': False, 'renderer': 'flot', 'seriesOverrides': [], 'spaceLength': 10, 'stack': False, 'steppedLine': False, 'targets': [ { 'exemplar': True, 'expr': f'container_memory_working_set_bytes{{namespace=~"{namespace}", pod=~"{pod_name}", container!="POD", container!="", image!=""}}', 'format': 'time_series', 'interval': '10s', 'intervalFactor': 1, 'legendFormat': 'Current', 'metric': 'container_memory_usage_bytes', 'refId': 'A', 'step': 10, }, { 'exemplar': True, 'expr': f'kube_pod_container_resource_limits_memory_bytes{{namespace=~"{namespace}", pod=~"{pod_name}"}}', 'format': 'time_series', 'hide': False, 'interval': '10s', 'intervalFactor': 2, 'legendFormat': 'Limit', 'metric': 'kube_pod_container_resource_limits_memory_bytes', 'refId': 'C', 'step': 20, }, ], 'thresholds': [], 'timeFrom': None, 'timeRegions': [], 'timeShift': None, 'title': 'Memory Usage', 'tooltip': { 'msResolution': True, 'shared': True, 'sort': 0, 'value_type': 'cumulative', }, 'type': 'graph', 'xaxis': { 'buckets': None, 'mode': 'time', 'name': None, 'show': True, 'values': [], }, 'yaxes': [ { '$$hashKey': 'object:818', 'format': 'bytes', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, { '$$hashKey': 'object:819', 'format': 'short', 'label': None, 'logBase': 1, 'max': None, 'min': None, 'show': True, }, ], 'yaxis': {'align': False, 'alignLevel': None}, }, { 'cacheTimeout': None, 'datasource': 'Prometheus', 'fieldConfig': { 'defaults': { 'color': {'fixedColor': 'rgb(87, 87, 87)', 'mode': 'fixed'}, 'decimals': 2, 'mappings': [ { 'id': 0, 'op': '=', 'text': 'N/A', 'type': 1, 'value': 'null', } ], 'thresholds': { 'mode': 'absolute', 'steps': [{'color': 'green', 'value': None}], }, 'unit': 'Bps', }, 'overrides': [], }, 'gridPos': {'h': 5, 'w': 5, 'x': 0, 'y': 17}, 'id': 7, 'interval': None, 'links': [], 'maxDataPoints': 100, 'options': { 'colorMode': 'value', 'graphMode': 'area', 'justifyMode': 'auto', 'orientation': 'horizontal', 'reduceOptions': { 'calcs': ['lastNotNull'], 'fields': '', 'values': False, }, 'text': {}, 'textMode': 'auto', }, 'pluginVersion': '7.5.5', 'targets': [ { 'exemplar': True, 'expr': f'sum(rate(container_network_transmit_bytes_total{{namespace=~"{namespace}",pod=~"{pod_name}"}}[5m])) + sum(rate(container_network_receive_bytes_total{{namespace=~"{namespace}",pod=~"{pod_name}"}}[5m])) ', 'format': 'time_series', 'interval': '', 'intervalFactor': 2, 'legendFormat': '', 'refId': 'A', 'step': 20, } ], 'title': 'Network RX/TX Total (bytes/sec)', 'type': 'stat', }, { 'datasource': 'Prometheus', 'fieldConfig': { 'defaults': { 'color': {'mode': 'palette-classic'}, 'custom': { 'axisLabel': '', 'axisPlacement': 'auto', 'barAlignment': 0, 'drawStyle': 'line', 'fillOpacity': 10, 'gradientMode': 'none', 'hideFrom': { 'graph': False, 'legend': False, 'tooltip': False, }, 'lineInterpolation': 'linear', 'lineWidth': 2, 'pointSize': 5, 'scaleDistribution': {'type': 'linear'}, 'showPoints': 'never', 'spanNulls': False, }, 'mappings': [], 'thresholds': { 'mode': 'absolute', 'steps': [ {'color': 'green', 'value': None}, {'color': 'red', 'value': 80}, ], }, 'unit': 'Bps', }, 'overrides': [], }, 'gridPos': {'h': 5, 'w': 19, 'x': 5, 'y': 17}, 'id': 3, 'links': [], 'options': { 'graph': {}, 'legend': { 'calcs': ['mean', 'lastNotNull', 'max', 'min', 'sum'], 'displayMode': 'hidden', 'placement': 'right', }, 'tooltipOptions': {'mode': 'single'}, }, 'pluginVersion': '7.5.5', 'targets': [ { 'exemplar': True, 'expr': f'rate(container_network_transmit_bytes_total{{namespace=~"{namespace}", pod=~"{pod_name}",interface=~"eth0|ens.*"}}[5m])', 'format': 'time_series', 'interval': '', 'intervalFactor': 2, 'legendFormat': 'TX', 'refId': 'A', 'step': 2, }, { 'exemplar': True, 'expr': f'rate(container_network_receive_bytes_total{{namespace=~"{namespace}", pod=~"{pod_name}",interface=~"eth0|ens.*"}}[5m])', 'format': 'time_series', 'interval': '', 'intervalFactor': 2, 'legendFormat': 'RX', 'refId': 'B', 'step': 2, }, ], 'timeFrom': None, 'timeShift': None, 'title': 'POD Network', 'type': 'timeseries', }, { 'collapsed': False, 'datasource': None, 'gridPos': {'h': 1, 'w': 24, 'x': 0, 'y': 22}, 'id': 37, 'panels': [], 'title': 'Alerts', 'type': 'row', }, { 'dashboardFilter': '', 'dashboardTags': [], 'datasource': None, 'fieldConfig': {'defaults': {}, 'overrides': []}, 'folderId': None, 'gridPos': {'h': 6, 'w': 24, 'x': 0, 'y': 23}, 'id': 39, 'limit': 10, 'nameFilter': '', 'onlyAlertsOnDashboard': True, 'pluginVersion': '7.5.5', 'repeat': None, 'show': 'current', 'sortOrder': 1, 'stateFilter': [ 'ok', 'paused', 'no_data', 'execution_error', 'alerting', 'pending', ], 'timeFrom': None, 'timeShift': None, 'title': 'Alerts', 'type': 'alertlist', }, ], 'refresh': '10s', 'tags': ['kubernetes', 'pods', 'KubeZephyr'], 'timezone': 'browser', 'title': f'{user.realName}-{pod_name}: POD Overview with alerts', 'uid': f'{namespace}-{pod_name}', }, 'folderId': 32 }
43.92148
278
0.246974
2,250
48,665
5.274667
0.151556
0.014156
0.012639
0.018622
0.817577
0.784968
0.761544
0.723879
0.705342
0.655713
0
0.021194
0.629631
48,665
1,107
279
43.961156
0.637261
0
0
0.683636
0
0.004545
0.224227
0.035898
0
0
0
0
0
1
0.003636
false
0
0.002727
0.002727
0.01
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
631e26d219e1c326e87a4ee652a2fb048758abc4
40,383
py
Python
packages/winpanda/extra/src/winpanda/core/command.py
pierrebeitz/dcos
424a390624f9a97b84da25ca03caf94707e13e01
[ "Apache-2.0" ]
null
null
null
packages/winpanda/extra/src/winpanda/core/command.py
pierrebeitz/dcos
424a390624f9a97b84da25ca03caf94707e13e01
[ "Apache-2.0" ]
null
null
null
packages/winpanda/extra/src/winpanda/core/command.py
pierrebeitz/dcos
424a390624f9a97b84da25ca03caf94707e13e01
[ "Apache-2.0" ]
null
null
null
"""Panda package management for Windows. DC/OS package management command definitions. """ import abc import os from pathlib import Path import shutil import yaml from cfgm import exceptions as cfgm_exc from common import exceptions as cm_exc from common import logger from common import storage from common import utils as cm_utl from common.cli import CLI_COMMAND, CLI_CMDTARGET, CLI_CMDOPT from common.storage import ISTOR_NODE from core import cmdconf from core import exceptions as cr_exc from core.istate import ISTATE from core.package.id import PackageId from core.package.manifest import PackageManifest from core.package.package import Package from core import utils as cr_utl from extm import exceptions as extm_exc from svcm import exceptions as svcm_exc from svcm.base import WindowsServiceManager from svcm.nssm import SVC_STATUS LOG = logger.get_logger(__name__) CMD_TYPES = {} def create(**cmd_opts): """Instantiate a command. :param cmd_opts: dict, command options: { 'command_name': <str>, ... } """ command_name = cmd_opts.get(CLI_CMDOPT.CMD_NAME, '') return CMD_TYPES[command_name](**cmd_opts) def command_type(command_name: str): """Register a command class in the command types registry. :param command_name: str, name of a command """ def decorator(cls): """""" CMD_TYPES[command_name] = cls return cls return decorator class Command(metaclass=abc.ABCMeta): """Abstract base class for command types. """ def __init__(self, **cmd_opts): """Constructor.""" self.msg_src = self.__class__.__name__ self.cmd_opts = cmd_opts def __repr__(self): return ( '<%s(cmd_opts="%s")>' % (self.__class__.__name__, self.cmd_opts) ) def __str__(self): return self.__repr__() @abc.abstractmethod def verify_cmd_options(self, *args, **kwargs): """Verify command options.""" pass @abc.abstractmethod def execute(self, *args, **kwargs): """Execute command.""" pass @command_type(CLI_COMMAND.SETUP) class CmdSetup(Command): """Setup command implementation.""" def __init__(self, **cmd_opts): """""" super(CmdSetup, self).__init__(**cmd_opts) if self.cmd_opts.get(CLI_CMDOPT.CMD_TARGET) == CLI_CMDTARGET.STORAGE: # Deactivate cluster-related configuration steps self.cmd_opts[CLI_CMDOPT.DCOS_CLUSTERCFGPATH] = 'NOP' self.config = cmdconf.create(**self.cmd_opts) LOG.debug(f'{self.msg_src}: cmd_opts: {self.cmd_opts}') def verify_cmd_options(self): """Verify command options.""" pass def execute(self): """Execute command.""" LOG.debug(f'{self.msg_src}: Execute: Target:' f' {self.cmd_opts.get(CLI_CMDOPT.CMD_TARGET)}') try: cmd_target = self.cmd_opts.get(CLI_CMDOPT.CMD_TARGET) if cmd_target == CLI_CMDTARGET.STORAGE: # (Re)build/repair the installation storage structure. self.config.inst_storage.construct( clean=self.cmd_opts.get(CLI_CMDOPT.INST_CLEAN) ) elif cmd_target == CLI_CMDTARGET.PKGALL: istate = self.config.inst_state.istate if istate == ISTATE.INSTALLATION_IN_PROGRESS: self._handle_cmdtarget_pkgall() self._register_istate(ISTATE.INSTALLED) else: LOG.info( f'{self.msg_src}: Execute: Invalid DC/OS installation' f' state detected: {istate}: NOP' ) LOG.info(f'{self.msg_src}: Execute: OK') except cm_exc.WinpandaError: self._register_istate(ISTATE.INSTALLATION_FAILED) raise def _register_istate(self, inst_state): """""" # TODO: Move this method to the abstract base parent class Command to # avoid code duplication in command manager classes. msg_base = (f'{self.msg_src}:' f' Execute: Register installation state: {inst_state}') try: self.config.inst_state.istate = inst_state LOG.debug(f'{msg_base}: OK') except cr_exc.RCError as e: raise cr_exc.SetupCommandError(f'Execute: {type(e).__name__}: {e}') def _handle_cmdtarget_pkgall(self): """""" # TODO: This code is duplicated in the CmdUpgrade._handle_clean_setup() # stuff and so should be made standalone to be reused in both # classes avoiding massive code duplication. dstor_root_url = self.config.cluster_conf.get( 'distribution-storage', {} ).get('rooturl', '') dstor_pkgrepo_path = self.config.cluster_conf.get( 'distribution-storage', {} ).get('pkgrepopath', '') # Add packages to the local package repository and initialize their # manager objects packages_bulk = {} for item in self.config.ref_pkg_list: pkg_id = PackageId(pkg_id=item) try: self.config.inst_storage.add_package( pkg_id=pkg_id, dstor_root_url=dstor_root_url, dstor_pkgrepo_path=dstor_pkgrepo_path ) except cr_exc.RCError as e: err_msg = (f'{self.msg_src}: Execute: Add package to local' f' repository: {pkg_id.pkg_id}: {e}') raise cr_exc.SetupCommandError(err_msg) from e try: package = Package( pkg_id=pkg_id, istor_nodes=self.config.inst_storage.istor_nodes, cluster_conf=self.config.cluster_conf, extra_context=self.config.dcos_conf.get('values') ) except cr_exc.RCError as e: err_msg = (f'{self.msg_src}: Execute: Initialize package:' f' {pkg_id.pkg_id}: {e}') raise cr_exc.SetupCommandError(err_msg) from e packages_bulk[pkg_id.pkg_name] = package # Finalize package setup procedures taking package mutual # dependencies into account. packages_sorted_by_deps = cr_utl.pkg_sort_by_deps(packages_bulk) # Prepare base per package configuration objects for package in packages_sorted_by_deps: # TODO: This method moves parts of individual packages which should # be shared with other packages to DC/OS installation shared # directories (<inst_root>\[bin|etc|lib]). It should be # redesigned to deal with only required parts of packages and # not populating shared DC/OS installation directories with # unnecessary stuff. self._handle_pkg_dir_setup(package) # TODO: This should be replaced with Package.handle_config_setup() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade self._handle_pkg_cfg_setup(package) # Deploy DC/OS aggregated configuration object self._deploy_dcos_conf() # Run per package extra installation helpers, setup services and # save manifests for package in packages_sorted_by_deps: # TODO: This should be replaced with Package.handle_inst_extras() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade self._handle_pkg_inst_extras(package) # TODO: This should be replaced with Package.handle_svc_setup() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade self._handle_pkg_svc_setup(package) # TODO: This part should be replaced with Package.save_manifest() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade try: package.manifest.save() except cr_exc.RCError as e: err_msg = (f'{self.msg_src}: Execute: Register package:' f' {package.manifest.pkg_id.pkg_id}: {e}') raise cr_exc.SetupCommandError(err_msg) LOG.info(f'{self.msg_src}: Setup package:' f' {package.manifest.pkg_id.pkg_id}: OK') def _handle_pkg_dir_setup(self, package: Package): """Transfer files from special directories into location. :param package: Package, DC/OS package manager object """ # TODO: Move this functionality to a method of the Package class and # reuse it in CmdSetup and CmdUpgrade classes to avoid code # duplication. pkg_path = getattr( package.manifest.istor_nodes, ISTOR_NODE.PKGREPO ).joinpath(package.manifest.pkg_id.pkg_id) root = getattr( package.manifest.istor_nodes, ISTOR_NODE.ROOT ) for name in ('bin', 'etc', 'include', 'lib'): srcdir = pkg_path / name if srcdir.exists(): dstdir = root / name dstdir.mkdir(exist_ok=True) cm_utl.transfer_files(str(srcdir), str(dstdir)) def _handle_pkg_cfg_setup(self, package: Package): """Execute steps on package configuration files setup. :param package: Package, DC/OS package manager object """ # TODO: This method should be removed after transition to use of # Package.handle_config_setup() pkg_id = package.manifest.pkg_id LOG.debug(f'{self.msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' configuration: ...') try: package.cfg_manager.setup_conf() except cfgm_exc.PkgConfNotFoundError as e: LOG.debug(f'{self.msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' configuration: NOP') except cfgm_exc.PkgConfManagerError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup configuration:' f'{type(e).__name__}: {e}') raise cr_exc.SetupCommandError(err_msg) from e else: LOG.debug(f'{self.msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' configuration: OK') def _handle_pkg_inst_extras(self, package: Package): """Process package extra installation options. :param package: Package, DC/OS package manager object """ # TODO: This method should be removed after transition to use of # Package.handle_inst_extras() msg_src = self.__class__.__name__ pkg_id = package.manifest.pkg_id if package.ext_manager: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: ...') try: package.ext_manager.handle_install_extras() except extm_exc.InstExtrasManagerError as e: err_msg = (f'Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: {e}') raise cr_exc.SetupCommandError(err_msg) from e LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: OK') else: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: NOP') def _handle_pkg_svc_setup(self, package: Package): """Execute steps on package service setup. :param package: Package, DC/OS package manager object """ # TODO: This method should be removed after transition to use of # Package.handle_svc_setup() msg_src = self.__class__.__name__ pkg_id = package.manifest.pkg_id if package.svc_manager: svc_name = package.svc_manager.svc_name LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup service:' f' {svc_name}: ...') try: ret_code, stdout, stderr = package.svc_manager.status() except svcm_exc.ServiceManagerCommandError as e: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' service: Get initial service status: {svc_name}:' f' {e}') # Try to setup, as a service (expectedly) doesn't exist and # checking it's status naturally would yield an error. try: package.svc_manager.setup() except svcm_exc.ServiceManagerCommandError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup service:' f' {svc_name}: {e}') raise cr_exc.SetupCommandError(err_msg) from e else: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' service: Get initial service status: {svc_name}:' f' stdout[{stdout}] stderr[{stderr}]') svc_status = str(stdout).strip().rstrip('\n') # Try to remove existing service try: if svc_status == SVC_STATUS.RUNNING: package.svc_manager.stop() package.svc_manager.remove() LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Remove' f' existing service: {svc_name}: OK') except svcm_exc.ServiceManagerCommandError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Remove existing' f' service: {svc_name}: {e}') raise cr_exc.SetupCommandError(err_msg) from e # Setup a replacement service try: package.svc_manager.setup() ret_code, stdout, stderr = (package.svc_manager.status()) svc_status = str(stdout).strip().rstrip('\n') except svcm_exc.ServiceManagerCommandError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup replacement' f' service: {svc_name}: {e}') raise cr_exc.SetupCommandError(err_msg) from e else: if svc_status != SVC_STATUS.STOPPED: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup' f' replacement service: {svc_name}:' f' Invalid status: {svc_status}') raise cr_exc.SetupCommandError(err_msg) LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup service:' f' {svc_name}: OK') else: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup service:' f' NOP') def _deploy_dcos_conf(self): """Deploy aggregated DC/OS configuration object.""" # TODO: This should be made standalone and then reused in command # manager classes CmdSetup and CmdUpgrade to avoid code # duplication LOG.debug(f'{self.msg_src}: Execute: Deploy aggregated config: ...') template = self.config.dcos_conf.get('template') values = self.config.dcos_conf.get('values') rendered = template.render(values) config = yaml.safe_load(rendered) assert config.keys() == {"package"} # Write out the individual files for file_info in config["package"]: assert file_info.keys() <= {"path", "content", "permissions"} path = Path(file_info['path'].replace('\\', os.path.sep)) path.parent.mkdir(parents=True, exist_ok=True) path.write_text(file_info['content'] or '') # On Windows, we don't interpret permissions yet LOG.debug(f'{self.msg_src}: Execute: Deploy aggregated config: OK') @command_type(CLI_COMMAND.UPGRADE) class CmdUpgrade(Command): """Implementation of the Upgrade command manager.""" def __init__(self, **cmd_opts): """""" super(CmdUpgrade, self).__init__(**cmd_opts) self.config = cmdconf.create(**self.cmd_opts) LOG.debug(f'{self.msg_src}: cmd_opts: {self.cmd_opts}') def verify_cmd_options(self): """Verify command options.""" pass def execute(self): """Execute command.""" LOG.debug(f'{self.msg_src}: Execute ...') try: istate = self.config.inst_state.istate if istate == ISTATE.UPGRADE_IN_PROGRESS: self._handle_upgrade() self._register_istate(ISTATE.INSTALLED) else: LOG.info( f'{self.msg_src}: Execute: Invalid DC/OS installation' f' state detected: {istate}: NOP' ) LOG.info(f'{self.msg_src}: Execute: OK') except cm_exc.WinpandaError: self._register_istate(ISTATE.UPGRADE_FAILED) raise def _register_istate(self, inst_state): """""" # TODO: Move this method to the abstract base parent class Command to # avoid code duplication in command manager classes. msg_base = (f'{self.msg_src}:' f' Execute: Register installation state: {inst_state}') try: self.config.inst_state.istate = inst_state LOG.debug(f'{msg_base}: OK') except cr_exc.RCError as e: raise cr_exc.SetupCommandError(f'Execute: {type(e).__name__}: {e}') def _handle_upgrade(self): """""" self._handle_upgrade_pre() self._handle_teardown() self._handle_teardown_post() self._handle_clean_setup() def _handle_upgrade_pre(self): """""" mheading = f'{self.msg_src}: Execute' # TODO: Add all the upgrade preparation steps (package download, # TODO: rendering configs, etc.) here. I.e. everything that can be # TODO: done without affecting the currently running system. def _handle_teardown(self): """Teardown the currently installed DC/OS.""" mheading = f'{self.msg_src}: Execute' pkg_manifests = ( self.config.inst_storage.get_pkgactive(PackageManifest.load) ) packages_bulk = { m.pkg_id.pkg_name: Package(manifest=m) for m in pkg_manifests } iroot_dpath = self.config.inst_storage.root_dpath itmp_dpath = self.config.inst_storage.tmp_dpath pkgactive_old_dpath = itmp_dpath.joinpath( f'{storage.DCOS_PKGACTIVE_DPATH_DFT}.old' ) sh_conf_dname = storage.DCOS_INST_CFG_DPATH_DFT sh_exec_dname = storage.DCOS_INST_BIN_DPATH_DFT sh_lib__dname = storage.DCOS_INST_LIB_DPATH_DFT # Teardown installed packages for package in cr_utl.pkg_sort_by_deps(packages_bulk): package.handle_svc_wipe(mheading) package.handle_uninst_extras(mheading) package.handle_vardata_wipe(mheading) package.save_manifest(mheading, pkgactive_old_dpath) package.delete_manifest(mheading) # Remove/preserve shared directories for dname in sh_conf_dname, sh_exec_dname, sh_lib__dname: active_dpath = iroot_dpath.joinpath(dname) preserve_dpath = itmp_dpath.joinpath(f'{dname}.old') try: active_dpath.rename(preserve_dpath) except (OSError, RuntimeError) as e: err_msg = (f'{mheading}: Preserve shared directory:' f' {active_dpath}: {type(e).__name__}: {e}') raise cr_exc.RCError(err_msg) from e LOG.debug(f'{mheading}: Preserve hared directory: {active_dpath}:' f' {preserve_dpath}') def _handle_teardown_post(self): """Perform extra steps on cleaning up unplanned (diverging from initial winpanda design and so, not removed by normal teardown procedure) DC/OS installation leftovers (see the CmdSetup._handle_pkg_dir_setup() and workaround for dcos-diagnostics part in the InstallationStorage.add_package()). """ mheading = f'{self.msg_src}: Execute' LOG.debug(f'{mheading}: After steps: ...') iroot_dpath = self.config.inst_storage.root_dpath ivar_dpath = self.config.inst_storage.var_dpath itmp_dpath = self.config.inst_storage.tmp_dpath wipe_dirs = [ iroot_dpath.joinpath('include'), iroot_dpath.joinpath('mesos-logs'), ivar_dpath.joinpath('lib'), ] for dpath in wipe_dirs: try: cm_utl.rmdir(str(dpath), recursive=True) LOG.debug(f'{mheading}: After steps: Remove dir: {dpath}: OK') except (OSError, RuntimeError) as e: LOG.warning(f'{mheading}: After steps: Remove dir: {dpath}:' f' {type(e).__name__}: {e}') wipe_files = [ iroot_dpath.joinpath('dcos-diagnostics.exe'), iroot_dpath.joinpath('servicelist.txt'), ] for fpath in wipe_files: try: fpath.unlink() LOG.debug(f'{mheading}: After steps: Remove file: {fpath}: OK') except (OSError, RuntimeError) as e: LOG.warning(f'{mheading}: After steps: Remove file: {fpath}:' f' {type(e).__name__}: {e}') # Restoreobjects created/populated by entities/processes outside # of winpanda routines, but required for winpanda to do it's stuff. restore_dirs = [ iroot_dpath.joinpath('bin'), iroot_dpath.joinpath('etc'), ] for dpath in restore_dirs: try: dpath.mkdir(parents=True, exist_ok=True) LOG.debug(f'{mheading}: After steps: Restore dir: {dpath}: OK') except (OSError, RuntimeError) as e: LOG.warning(f'{mheading}: After steps: Restore dir: {dpath}:' f' {type(e).__name__}: {e}') restore_files = [ (itmp_dpath.joinpath('bin.old', 'detect_ip.ps1'), iroot_dpath.joinpath('bin')), (itmp_dpath.joinpath('bin.old', 'detect_ip_public.ps1'), iroot_dpath.joinpath('bin')), (itmp_dpath.joinpath('bin.old', 'fault-domain-detect-win.ps1'), iroot_dpath.joinpath('bin')), (itmp_dpath.joinpath('etc.old', 'cluster.conf'), iroot_dpath.joinpath('etc')), (itmp_dpath.joinpath('etc.old', 'paths.json'), iroot_dpath.joinpath('etc')), ] for fspec in restore_files: try: shutil.copy(str(fspec[0]), str(fspec[1]), follow_symlinks=False) LOG.debug(f'{mheading}: After steps: Restore file: {fspec}: OK') except (OSError, RuntimeError) as e: LOG.warning(f'{mheading}: After steps: Restore file: {fspec}:' f' {type(e).__name__}: {e}') LOG.debug(f'{mheading}: After steps: OK') def _handle_clean_setup(self): """Perform all the steps on DC/OS installation remaining after the preparation stage is done (the CmdUpgrade._handle_upgrade_pre()). """ # TODO: This code duplicates the CmdSetup._handle_cmdtarget_pkgall() # stuff and so should be made standalone to be reused in both # classes avoiding massive code duplication. mheading = f'{self.msg_src}: Execute' dstor_root_url = self.config.cluster_conf.get( 'distribution-storage', {} ).get('rooturl', '') dstor_pkgrepo_path = self.config.cluster_conf.get( 'distribution-storage', {} ).get('pkgrepopath', '') # Add packages to the local package repository and initialize their # manager objects packages_bulk = {} for item in self.config.ref_pkg_list: pkg_id = PackageId(pkg_id=item) try: self.config.inst_storage.add_package( pkg_id=pkg_id, dstor_root_url=dstor_root_url, dstor_pkgrepo_path=dstor_pkgrepo_path ) except cr_exc.RCError as e: err_msg = (f'{self.msg_src}: Execute: Add package to local' f' repository: {pkg_id.pkg_id}: {e}') raise cr_exc.SetupCommandError(err_msg) from e try: package = Package( pkg_id=pkg_id, istor_nodes=self.config.inst_storage.istor_nodes, cluster_conf=self.config.cluster_conf, extra_context=self.config.dcos_conf.get('values') ) except cr_exc.RCError as e: err_msg = (f'{self.msg_src}: Execute: Initialize package:' f' {pkg_id.pkg_id}: {e}') raise cr_exc.SetupCommandError(err_msg) from e packages_bulk[pkg_id.pkg_name] = package # Finalize package setup procedures taking package mutual # dependencies into account. packages_sorted_by_deps = cr_utl.pkg_sort_by_deps(packages_bulk) # Prepare base per package configuration objects for package in packages_sorted_by_deps: # TODO: This method moves parts of individual packages which should # be shared with other packages to DC/OS installation shared # directories (<inst_root>\[bin|etc|lib]). It should be # redesigned to deal with only required parts of packages and # not populating shared DC/OS installation directories with # unnecessary stuff. self._handle_pkg_dir_setup(package) # TODO: This should be replaced with Package.handle_config_setup() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade self._handle_pkg_cfg_setup(package) # Deploy DC/OS aggregated configuration object self._deploy_dcos_conf() # Run per package extra installation helpers, setup services and # save manifests for package in packages_sorted_by_deps: # TODO: This should be replaced with Package.handle_inst_extras() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade self._handle_pkg_inst_extras(package) # TODO: This should be replaced with Package.handle_svc_setup() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade self._handle_pkg_svc_setup(package) # TODO: This part should be replaced with Package.save_manifest() # method to avoid code duplication in command manager classes # CmdSetup and CmdUpgrade try: package.manifest.save() except cr_exc.RCError as e: err_msg = (f'{self.msg_src}: Execute: Register package:' f' {package.manifest.pkg_id.pkg_id}: {e}') raise cr_exc.SetupCommandError(err_msg) LOG.info(f'{self.msg_src}: Setup package:' f' {package.manifest.pkg_id.pkg_id}: OK') def _handle_pkg_dir_setup(self, package: Package): """Transfer files from special directories into location. :param package: Package, DC/OS package manager object """ # TODO: Move this functionality to a method of the Package class and # reuse it in CmdSetup and CmdUpgrade classes to avoid code # duplication. pkg_path = getattr( package.manifest.istor_nodes, ISTOR_NODE.PKGREPO ).joinpath(package.manifest.pkg_id.pkg_id) root = getattr( package.manifest.istor_nodes, ISTOR_NODE.ROOT ) for name in ('bin', 'etc', 'include', 'lib'): srcdir = pkg_path / name if srcdir.exists(): dstdir = root / name dstdir.mkdir(exist_ok=True) cm_utl.transfer_files(str(srcdir), str(dstdir)) def _handle_pkg_cfg_setup(self, package: Package): """Execute steps on package configuration files setup. :param package: Package, DC/OS package manager object """ # TODO: This method should be removed after transition to use of # Package.handle_config_setup() pkg_id = package.manifest.pkg_id LOG.debug(f'{self.msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' configuration: ...') try: package.cfg_manager.setup_conf() except cfgm_exc.PkgConfNotFoundError as e: LOG.debug(f'{self.msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' configuration: NOP') except cfgm_exc.PkgConfManagerError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup configuration:' f'{type(e).__name__}: {e}') raise cr_exc.SetupCommandError(err_msg) from e else: LOG.debug(f'{self.msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' configuration: OK') def _handle_pkg_inst_extras(self, package: Package): """Process package extra installation options. :param package: Package, DC/OS package manager object """ # TODO: This method should be removed after transition to use of # Package.handle_inst_extras() msg_src = self.__class__.__name__ pkg_id = package.manifest.pkg_id if package.ext_manager: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: ...') try: package.ext_manager.handle_install_extras() except extm_exc.InstExtrasManagerError as e: err_msg = (f'Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: {e}') raise cr_exc.SetupCommandError(err_msg) from e LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: OK') else: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}:' f' Handle extra installation options: NOP') def _handle_pkg_svc_setup(self, package: Package): """Execute steps on package service setup. :param package: Package, DC/OS package manager object """ # TODO: This method should be removed after transition to use of # Package.handle_svc_setup() msg_src = self.__class__.__name__ pkg_id = package.manifest.pkg_id if package.svc_manager: svc_name = package.svc_manager.svc_name LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup service:' f' {svc_name}: ...') try: ret_code, stdout, stderr = package.svc_manager.status() except svcm_exc.ServiceManagerCommandError as e: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' service: Get initial service status: {svc_name}:' f' {e}') # Try to setup, as a service (expectedly) doesn't exist and # checking it's status naturally would yield an error. try: package.svc_manager.setup() except svcm_exc.ServiceManagerCommandError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup service:' f' {svc_name}: {e}') raise cr_exc.SetupCommandError(err_msg) from e else: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup' f' service: Get initial service status: {svc_name}:' f' stdout[{stdout}] stderr[{stderr}]') svc_status = str(stdout).strip().rstrip('\n') # Try to remove existing service try: if svc_status == SVC_STATUS.RUNNING: package.svc_manager.stop() package.svc_manager.remove() LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Remove' f' existing service: {svc_name}: OK') except svcm_exc.ServiceManagerCommandError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Remove existing' f' service: {svc_name}: {e}') raise cr_exc.SetupCommandError(err_msg) from e # Setup a replacement service try: package.svc_manager.setup() ret_code, stdout, stderr = (package.svc_manager.status()) svc_status = str(stdout).strip().rstrip('\n') except svcm_exc.ServiceManagerCommandError as e: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup replacement' f' service: {svc_name}: {e}') raise cr_exc.SetupCommandError(err_msg) from e else: if svc_status != SVC_STATUS.STOPPED: err_msg = (f'Execute: {pkg_id.pkg_name}: Setup' f' replacement service: {svc_name}:' f' Invalid status: {svc_status}') raise cr_exc.SetupCommandError(err_msg) LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup service:' f' {svc_name}: OK') else: LOG.debug(f'{msg_src}: Execute: {pkg_id.pkg_name}: Setup service:' f' NOP') def _deploy_dcos_conf(self): """Deploy aggregated DC/OS configuration object.""" # TODO: This should be made standalone and then reused in command # manager classes CmdSetup and CmdUpgrade to avoid code # duplication LOG.debug(f'{self.msg_src}: Execute: Deploy aggregated config: ...') template = self.config.dcos_conf.get('template') values = self.config.dcos_conf.get('values') rendered = template.render(values) config = yaml.safe_load(rendered) assert config.keys() == {"package"} # Write out the individual files for file_info in config["package"]: assert file_info.keys() <= {"path", "content", "permissions"} path = Path(file_info['path'].replace('\\', os.path.sep)) path.parent.mkdir(parents=True, exist_ok=True) path.write_text(file_info['content'] or '') # On Windows, we don't interpret permissions yet LOG.debug(f'{self.msg_src}: Execute: Deploy aggregated config: OK') @command_type(CLI_COMMAND.START) class CmdStart(Command): """Start command implementation.""" def __init__(self, **cmd_opts): """Constructor.""" self.msg_src = self.__class__.__name__ super(CmdStart, self).__init__(**cmd_opts) self.config = cmdconf.create(**self.cmd_opts) LOG.debug(f'{self.msg_src}: cmd_opts: {self.cmd_opts}') def verify_cmd_options(self): """Verify command options.""" pass def execute(self): """Execute command.""" # TODO: Implement DC/OS installation state detection here (alike how # it's done in CmdSetup.execute() or CmdSetup.execute()) to # allow attempts to start services only if # istate == ISTATE.INSTALLED: pkg_manifests = ( self.config.inst_storage.get_pkgactive(PackageManifest.load) ) packages_bulk = { m.pkg_id.pkg_name: Package(manifest=m) for m in pkg_manifests } for package in cr_utl.pkg_sort_by_deps(packages_bulk): pkg_id = package.manifest.pkg_id mheading = f'{self.msg_src}: Execute: {pkg_id.pkg_name}' # TODO: This part should be replaced with # Package.handle_svc_start() method if package.svc_manager: svc_name = package.svc_manager.svc_name LOG.debug(f'{mheading}: Start service: {svc_name}: ...') try: self.service_start(package.svc_manager) except (svcm_exc.ServiceError, svcm_exc.ServiceManagerError) as e: LOG.error(f'{mheading}: Start service:' f' {type(e).__name__}: {e}') else: LOG.debug(f'{mheading}: Start service: {svc_name}: OK') else: LOG.debug(f'{mheading}: Start service: NOP') @cm_utl.retry_on_exc((svcm_exc.ServiceManagerCommandError, svcm_exc.ServiceTransientError), max_attempts=3) def service_start(self, svc_manager): """Start a system service. :param svc_manager: WindowsServiceManager, service manager object """ # TODO: Functionality of this method should be moved to the # Package.handle_svc_start() method svc_name = svc_manager.svc_name # Discover initial service status try: ret_code, stdout, stderr = svc_manager.status() except svcm_exc.ServiceManagerCommandError as e: err_msg = f'Get initial service status: {svc_name}: {e}' raise type(e)(err_msg) from e # Subject to retry else: log_msg = (f'Get initial service status: {svc_name}:' f'stdout[{stdout}] stderr[{stderr}]') LOG.debug(log_msg) svc_status = str(stdout).strip().rstrip('\n') # Manage service appropriately to its status if svc_status == SVC_STATUS.STOPPED: # Start a service try: svc_manager.start() except svcm_exc.ServiceManagerCommandError as e: err_msg = f'Start service: {svc_name}: {e}' raise type(e)(err_msg) from e # Subject to retry # Verify that service is running try: ret_code, stdout, stderr = svc_manager.status() LOG.debug(f'Get final service status: {svc_name}:' f'stdout[{stdout}] stderr[{stderr}]') svc_status = str(stdout).strip().rstrip('\n') if svc_status == SVC_STATUS.START_PENDING: msg = f'Service is starting: {svc_name}' LOG.debug(msg) raise svcm_exc.ServiceTransientError(msg) # Subject to retry elif svc_status != SVC_STATUS.RUNNING: err_msg = (f'Start service: {svc_name}: Failed:' f' {svc_status}') raise svcm_exc.ServicePersistentError(err_msg) except svcm_exc.ServiceManagerCommandError as e: err_msg = f'Get final service status: {svc_name}: {e}' raise type(e)(err_msg) from e # Subject to retry elif svc_status == SVC_STATUS.START_PENDING: msg = f'Service is starting: {svc_name}: ...' LOG.debug(msg) raise svcm_exc.ServiceTransientError(msg) # Subject to retry elif svc_status == SVC_STATUS.RUNNING: LOG.debug(f'Service is already running: {svc_name}') else: err_msg = f'Invalid service status: {svc_name}: {svc_status}' raise svcm_exc.ServicePersistentError(err_msg)
42.374607
81
0.578387
4,647
40,383
4.799225
0.090381
0.019505
0.019729
0.022061
0.796386
0.781365
0.767241
0.743969
0.722133
0.709981
0
0.000221
0.326598
40,383
952
82
42.419118
0.819887
0.202115
0
0.709677
0
0
0.194868
0.007552
0
0
0
0.011555
0.006452
1
0.058065
false
0.008065
0.037097
0.003226
0.109677
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
7
2d4a74d80e7ca12edf9fa024d3e0f9a34e70cf37
1,084
py
Python
jp.atcoder/abc059/arc072_a/8228095.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
1
2022-02-09T03:06:25.000Z
2022-02-09T03:06:25.000Z
jp.atcoder/abc059/arc072_a/8228095.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
1
2022-02-05T22:53:18.000Z
2022-02-09T01:29:30.000Z
jp.atcoder/abc059/arc072_a/8228095.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
null
null
null
# c_WA2.py よりはWAが減った n = int(input()) a = [int(a) for a in input().split()] times = 0 sum_prefix = a[0] for i in range(1, n): if sum_prefix < 0: if sum_prefix + a[i] > 0: sum_prefix += a[i] else: times += 1 - (sum_prefix + a[i]) sum_prefix = 1 elif sum_prefix > 0: if sum_prefix + a[i] < 0: sum_prefix += a[i] else: times += abs(-1 - (sum_prefix + a[i])) sum_prefix = -1 times2 = 0 if a[0] > 0: times2 += abs(-1 - a[0]) sum_prefix = -1 else: times2 += 1 - a[0] sum_prefix = 1 for i in range(1, n): if sum_prefix < 0: if sum_prefix + a[i] > 0: sum_prefix = sum_prefix + a[i] else: times2 += 1 - (sum_prefix + a[i]) sum_prefix = 1 elif sum_prefix > 0: if sum_prefix + a[i] < 0: sum_prefix = sum_prefix + a[i] else: times2 += abs(-1 - (sum_prefix + a[i])) sum_prefix = -1 print(min(times, times2))
24.088889
52
0.453875
161
1,084
2.89441
0.15528
0.482833
0.27897
0.283262
0.759657
0.759657
0.703863
0.703863
0.703863
0.596567
0
0.05625
0.409594
1,084
44
53
24.636364
0.671875
0.016605
0
0.657895
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.026316
0
0
0
null
1
1
1
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
2d906dfbb5c1a5c19483b5b184524f45a569b4bc
78
py
Python
plotting/__init__.py
chrvt/Inflation-Deflation
e19dc603e711df32821f4380acefc88ff64645a4
[ "MIT" ]
null
null
null
plotting/__init__.py
chrvt/Inflation-Deflation
e19dc603e711df32821f4380acefc88ff64645a4
[ "MIT" ]
null
null
null
plotting/__init__.py
chrvt/Inflation-Deflation
e19dc603e711df32821f4380acefc88ff64645a4
[ "MIT" ]
null
null
null
from .plots import plt_latent_distribution from .plots import plt_latent_fom
26
43
0.858974
12
78
5.25
0.583333
0.285714
0.47619
0.571429
0.761905
0
0
0
0
0
0
0
0.115385
78
2
44
39
0.913043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
930d34ce5dcbf8d25c1c5913cb5b24b5608ece0c
37,418
py
Python
tests/test_libfv.py
source-foundry/font-v
590c9f540cf73ffccc2a1907b186d16b296729e5
[ "MIT" ]
14
2017-09-15T23:29:03.000Z
2021-12-06T12:48:39.000Z
tests/test_libfv.py
source-foundry/font-v
590c9f540cf73ffccc2a1907b186d16b296729e5
[ "MIT" ]
135
2017-09-05T13:33:29.000Z
2022-03-28T08:03:02.000Z
tests/test_libfv.py
source-foundry/font-v
590c9f540cf73ffccc2a1907b186d16b296729e5
[ "MIT" ]
6
2018-10-23T14:49:17.000Z
2021-12-01T22:47:37.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import math import os import os.path import re import pytest from fontTools.ttLib import TTFont, TTLibError from fontv.libfv import FontVersion # TEST FONT FILE CREATION # fv = FontVersion("testfiles/Hack-Regular.ttf") # # fv.version_string_parts = ['Version 1.010'] # fv.write_version_string(fontpath="testfiles/Test-VersionOnly.ttf") # # fv.version_string_parts = ["Version 1.010", "DEV"] # fv.write_version_string(fontpath="testfiles/Test-VersionDEV.ttf") # # fv.version_string_parts = ["Version 1.010", "RELEASE"] # fv.write_version_string(fontpath="testfiles/Test-VersionREL.ttf") # # fv.version_string_parts = ["Version 1.010", "[abcd123]"] # fv.write_version_string(fontpath="testfiles/Test-VersionSha.ttf") # # fv.version_string_parts = ["Version 1.010", "[abcd123]", "metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionShaMeta.ttf") # # fv.version_string_parts = ["Version 1.010", "[abcd123]-dev"] # fv.write_version_string(fontpath="testfiles/Test-VersionShaDEV.ttf") # # fv.version_string_parts = ["Version 1.010", "[abcd123]-release"] # fv.write_version_string(fontpath="testfiles/Test-VersionShaREL.ttf") # # fv.version_string_parts = ["Version 1.010", "metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionMeta.ttf") # # fv.version_string_parts = ["Version 1.010", "metadata string", "another metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionMoreMeta.ttf") # # fv.version_string_parts = ["Version 1.010", "DEV", "metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionDEVMeta.ttf") # # fv.version_string_parts = ["Version 1.010", "RELEASE", "metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionRELMeta.ttf") # # fv.version_string_parts = ["Version 1.010", "[abcd123]-dev", "metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionShaDEVMeta.ttf") # # fv.version_string_parts = ["Version 1.010", "[abcd123]-release", "metadata string"] # fv.write_version_string(fontpath="testfiles/Test-VersionShaRELMeta.ttf") # Test file version strings (ttf shown, otf with same paths include the same version strings) # Test-VersionDEV.ttf: # Version 1.010;DEV # Test-VersionDEVMeta.ttf: # Version 1.010;DEV;metadata string # Test-VersionMeta.ttf: # Version 1.010;metadata string # Test-VersionMoreMeta.ttf: # Version 1.010;metadata string;another metadata string # Test-VersionOnly.ttf: # Version 1.010 # Test-VersionREL.ttf: # Version 1.010;RELEASE # Test-VersionRELMeta.ttf: # Version 1.010;RELEASE;metadata string # Test-VersionSha.ttf: # Version 1.010;[abcd123] # Test-VersionShaMeta.ttf: # Version 1.010;[abcd123];metadata string # Test-VersionShaDEV.ttf: # Version 1.010;[abcd123]-dev # Test-VersionShaDEVMeta.ttf: # Version 1.010;[abcd123]-dev;metadata string # Test-VersionShaREL.ttf: # Version 1.010;[abcd123]-release # Test-VersionShaRELMeta.ttf: # Version 1.010;[abcd123]-release;metadata string all_testfiles_list = [ "tests/testfiles/Test-VersionDEV.ttf", "tests/testfiles/Test-VersionDEVMeta.ttf", "tests/testfiles/Test-VersionMeta.ttf", "tests/testfiles/Test-VersionMoreMeta.ttf", "tests/testfiles/Test-VersionOnly.ttf", "tests/testfiles/Test-VersionREL.ttf", "tests/testfiles/Test-VersionRELMeta.ttf", "tests/testfiles/Test-VersionSha.ttf", "tests/testfiles/Test-VersionShaMeta.ttf", "tests/testfiles/Test-VersionShaDEV.ttf", "tests/testfiles/Test-VersionShaDEVMeta.ttf", "tests/testfiles/Test-VersionShaREL.ttf", "tests/testfiles/Test-VersionShaRELMeta.ttf", "tests/testfiles/Test-VersionDEV.otf", "tests/testfiles/Test-VersionDEVMeta.otf", "tests/testfiles/Test-VersionMeta.otf", "tests/testfiles/Test-VersionMoreMeta.otf", "tests/testfiles/Test-VersionOnly.otf", "tests/testfiles/Test-VersionREL.otf", "tests/testfiles/Test-VersionRELMeta.otf", "tests/testfiles/Test-VersionSha.otf", "tests/testfiles/Test-VersionShaMeta.otf", "tests/testfiles/Test-VersionShaDEV.otf", "tests/testfiles/Test-VersionShaDEVMeta.otf", "tests/testfiles/Test-VersionShaREL.otf", "tests/testfiles/Test-VersionShaRELMeta.otf", ] meta_testfiles_list = [ "tests/testfiles/Test-VersionMeta.ttf", "tests/testfiles/Test-VersionMoreMeta.ttf", "tests/testfiles/Test-VersionMeta.otf", "tests/testfiles/Test-VersionMoreMeta.otf", ] dev_testfiles_list = [ "tests/testfiles/Test-VersionDEV.ttf", "tests/testfiles/Test-VersionDEVMeta.ttf", "tests/testfiles/Test-VersionShaDEV.ttf", "tests/testfiles/Test-VersionShaDEVMeta.ttf", "tests/testfiles/Test-VersionDEV.otf", "tests/testfiles/Test-VersionDEVMeta.otf", "tests/testfiles/Test-VersionShaDEV.otf", "tests/testfiles/Test-VersionShaDEVMeta.otf", ] rel_testfiles_list = [ "tests/testfiles/Test-VersionREL.ttf", "tests/testfiles/Test-VersionRELMeta.ttf", "tests/testfiles/Test-VersionShaREL.ttf", "tests/testfiles/Test-VersionShaRELMeta.ttf", "tests/testfiles/Test-VersionREL.otf", "tests/testfiles/Test-VersionRELMeta.otf", "tests/testfiles/Test-VersionShaREL.otf", "tests/testfiles/Test-VersionShaRELMeta.otf", ] state_testfiles_list = [ "tests/testfiles/Test-VersionSha.ttf", "tests/testfiles/Test-VersionShaMeta.ttf", "tests/testfiles/Test-VersionShaDEV.ttf", "tests/testfiles/Test-VersionShaREL.ttf", "tests/testfiles/Test-VersionShaDEVMeta.ttf", "tests/testfiles/Test-VersionShaRELMeta.ttf", "tests/testfiles/Test-VersionSha.otf", "tests/testfiles/Test-VersionShaMeta.otf", "tests/testfiles/Test-VersionShaDEV.otf", "tests/testfiles/Test-VersionShaREL.otf", "tests/testfiles/Test-VersionShaDEVMeta.otf", "tests/testfiles/Test-VersionShaRELMeta.otf", ] # pytest fixtures for parametrized testing of various groupings of test files @pytest.fixture(params=all_testfiles_list) def allfonts(request): return request.param @pytest.fixture(params=meta_testfiles_list) def metafonts(request): return request.param @pytest.fixture(params=dev_testfiles_list) def devfonts(request): return request.param @pytest.fixture(params=rel_testfiles_list) def relfonts(request): return request.param @pytest.fixture(params=state_testfiles_list) def statefonts(request): return request.param # utilities for testing def _test_hexadecimal_sha1_formatted_string_matches(needle): p = re.compile(r"""\[[(a-f|0-9)]{7,15}\]""") m = p.match(needle) if m is None: return False else: return True def _test_hexadecimal_sha1_string_matches(needle): p = re.compile("""[(a-f|0-9)]{7,15}""") m = p.match(needle) if m is None: return False else: return True def _get_mock_missing_nameid5_ttfont(filepath): ttf = TTFont(filepath) record_list = [] for record in ttf["name"].names: if record.nameID == 5: pass else: record_list.append(record) ttf["name"].names = record_list return ttf # TESTS # # # BEGIN FontVersion INSTANTIATION TESTS # # def test_libfv_missing_file_read_attempt(): with pytest.raises(IOError): fv = FontVersion("tests/testfiles/bogus.ttf") def test_libfv_nonfont_file_read_attempt(): with pytest.raises(TTLibError): fv = FontVersion("tests/testfiles/test.txt") def test_libfv_mocked_missing_name_tables_attempt(): with pytest.raises(IndexError): ttf = _get_mock_missing_nameid5_ttfont("tests/testfiles/Test-VersionOnly.ttf") fv = FontVersion(ttf) def test_libfv_fontversion_obj_instantiation_with_filepath_string(allfonts): fv = FontVersion(allfonts) def test_libfv_fontversion_obj_instantiation_with_ttfont_object(allfonts): ttf = TTFont(allfonts) fv1 = FontVersion(ttf) fv2 = FontVersion(allfonts) assert fv1.fontpath == fv2.fontpath assert fv1.version_string_parts == fv2.version_string_parts assert fv1.develop_string == fv2.develop_string assert fv1.release_string == fv2.release_string assert fv1.sha1_develop == fv2.sha1_develop assert fv1.sha1_release == fv2.sha1_release assert fv1.version == fv2.version assert fv1.metadata == fv2.metadata assert fv1.contains_status == fv2.contains_status assert fv1.contains_metadata == fv2.contains_metadata assert fv1.is_release == fv2.is_release assert fv1.is_development == fv2.is_development def test_libfv_version_string_property_set_on_instantiation(allfonts): fv = FontVersion(allfonts) assert fv.version == "Version 1.010" def test_libfv_version_string_property_set_on_instantiation_ttfont_object(allfonts): ttf = TTFont(allfonts) fv = FontVersion(ttf) assert fv.version == "Version 1.010" def test_libfv_head_fontrevision_property_set_on_instantiation(allfonts): fv = FontVersion(allfonts) assert math.isclose(fv.head_fontRevision, 1.010, abs_tol=0.00001) def test_libfv_head_fontrevision_property_set_on_instantiation_ttfont_object(allfonts): ttf = TTFont(allfonts) fv = FontVersion(ttf) assert math.isclose(fv.head_fontRevision, 1.010, abs_tol=0.00001) def test_libfv_fontversion_object_parameter_properties_defaults(allfonts): fv = FontVersion(allfonts) assert fv.develop_string == "DEV" assert fv.release_string == "RELEASE" assert fv.sha1_develop == "-dev" assert fv.sha1_release == "-release" def test_libfv_fontversion_object_parameter_properties_defaults_ttfont_object(allfonts): ttf = TTFont(allfonts) fv = FontVersion(ttf) assert fv.develop_string == "DEV" assert fv.release_string == "RELEASE" assert fv.sha1_develop == "-dev" assert fv.sha1_release == "-release" def test_libfv_fontversion_object_properties_truth_defaults(): fv1 = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") assert fv1.contains_metadata is False assert fv1.contains_status is False assert fv1.is_development is False assert fv1.is_release is False assert fv2.contains_metadata is False assert fv2.contains_status is False assert fv2.is_development is False assert fv2.is_release is False def test_libfv_fontversion_object_properties_truth_defaults_ttfont_object(): ttf1 = TTFont("tests/testfiles/Test-VersionOnly.ttf") fv1 = FontVersion(ttf1) ttf2 = TTFont("tests/testfiles/Test-VersionOnly.otf") fv2 = FontVersion(ttf2) assert fv1.contains_metadata is False assert fv1.contains_status is False assert fv1.is_development is False assert fv1.is_release is False assert fv2.contains_metadata is False assert fv2.contains_status is False assert fv2.is_development is False assert fv2.is_release is False def test_libfv_fontversion_object_properties_truth_defaults_with_metaonly(metafonts): fv = FontVersion(metafonts) assert fv.contains_metadata is True assert fv.contains_status is False assert fv.is_development is False assert fv.is_release is False def test_libfv_fontversion_object_properties_truth_defaults_with_metaonly_ttfont_object( metafonts, ): ttf = TTFont(metafonts) fv = FontVersion(ttf) assert fv.contains_metadata is True assert fv.contains_status is False assert fv.is_development is False assert fv.is_release is False def test_libfv_fontversion_object_properties_truth_development(devfonts): fv = FontVersion(devfonts) assert fv.contains_metadata is True assert fv.contains_status is True assert fv.is_development is True assert fv.is_release is False def test_libfv_fontversion_object_properties_truth_development_ttfont_object(devfonts): ttf = TTFont(devfonts) fv = FontVersion(ttf) assert fv.contains_metadata is True assert fv.contains_status is True assert fv.is_development is True assert fv.is_release is False def test_libfv_fontversion_object_properties_truth_release(relfonts): fv = FontVersion(relfonts) assert fv.contains_metadata is True assert fv.contains_status is True assert fv.is_development is False assert fv.is_release is True def test_libfv_fontversion_object_properties_truth_release_ttfont_object(relfonts): ttf = TTFont(relfonts) fv = FontVersion(ttf) assert fv.contains_metadata is True assert fv.contains_status is True assert fv.is_development is False assert fv.is_release is True def test_libfv_fontversion_object_properties_truth_sha(statefonts): fv = FontVersion(statefonts) assert fv.contains_state is True assert len(fv.state) > 0 def test_libfv_fontversion_object_properties_truth_sha_ttfont_object(statefonts): ttf = TTFont(statefonts) fv = FontVersion(ttf) assert fv.contains_state is True assert len(fv.state) > 0 def test_libfv_fontversion_object_properties_truth_state_versionstring_only(): fv1 = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") assert fv1.contains_state is False assert fv2.contains_state is False assert len(fv1.state) == 0 assert len(fv2.state) == 0 def test_libfv_fontversion_object_properties_truth_state_meta_without_state(): fv1 = FontVersion("tests/testfiles/Test-VersionMeta.ttf") fv2 = FontVersion("tests/testfiles/Test-VersionMeta.otf") fv3 = FontVersion("tests/testfiles/Test-VersionMoreMeta.ttf") fv4 = FontVersion("tests/testfiles/Test-VersionMoreMeta.otf") assert fv1.contains_state is False assert fv2.contains_state is False assert fv3.contains_state is False assert fv4.contains_state is False assert len(fv1.state) == 0 assert len(fv2.state) == 0 assert len(fv3.state) == 0 assert len(fv4.state) == 0 def test_libfv_fontversion_object_versionparts_meta_lists_versionstring_only(): fv1 = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") assert len(fv1.version_string_parts) == 1 assert len(fv1.metadata) == 0 assert len(fv2.version_string_parts) == 1 assert len(fv2.metadata) == 0 def test_libfv_fontversion_object_versionparts_meta_lists_versionstring_only_ttfont_object(): ttf1 = TTFont("tests/testfiles/Test-VersionOnly.ttf") fv1 = FontVersion(ttf1) ttf2 = TTFont("tests/testfiles/Test-VersionOnly.otf") fv2 = FontVersion(ttf2) assert len(fv1.version_string_parts) == 1 assert len(fv1.metadata) == 0 assert len(fv2.version_string_parts) == 1 assert len(fv2.metadata) == 0 def test_libfv_fontversion_object_versionparts_meta_lists_version_with_onemeta(): fv1 = FontVersion("tests/testfiles/Test-VersionMeta.ttf") assert len(fv1.version_string_parts) == 2 assert fv1.version_string_parts[0] == "Version 1.010" assert fv1.version_string_parts[1] == "metadata string" assert len(fv1.metadata) == 1 assert fv1.metadata[0] == "metadata string" fv2 = FontVersion("tests/testfiles/Test-VersionMeta.otf") assert len(fv2.version_string_parts) == 2 assert fv2.version_string_parts[0] == "Version 1.010" assert fv2.version_string_parts[1] == "metadata string" assert len(fv2.metadata) == 1 assert fv2.metadata[0] == "metadata string" def test_libfv_fontversion_object_versionparts_meta_lists_version_with_onemeta_ttfont_object(): ttf1 = TTFont("tests/testfiles/Test-VersionMeta.ttf") fv1 = FontVersion(ttf1) assert len(fv1.version_string_parts) == 2 assert fv1.version_string_parts[0] == "Version 1.010" assert fv1.version_string_parts[1] == "metadata string" assert len(fv1.metadata) == 1 assert fv1.metadata[0] == "metadata string" ttf2 = TTFont("tests/testfiles/Test-VersionMeta.otf") fv2 = FontVersion(ttf2) assert len(fv2.version_string_parts) == 2 assert fv2.version_string_parts[0] == "Version 1.010" assert fv2.version_string_parts[1] == "metadata string" assert len(fv2.metadata) == 1 assert fv2.metadata[0] == "metadata string" def test_libfv_fontversion_object_versionparts_meta_lists_version_with_twometa(): fv = FontVersion("tests/testfiles/Test-VersionMoreMeta.ttf") assert len(fv.version_string_parts) == 3 assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "metadata string" assert fv.version_string_parts[2] == "another metadata string" assert len(fv.metadata) == 2 assert fv.metadata[0] == "metadata string" assert fv.metadata[1] == "another metadata string" fv2 = FontVersion("tests/testfiles/Test-VersionMoreMeta.otf") assert len(fv2.version_string_parts) == 3 assert fv2.version_string_parts[0] == "Version 1.010" assert fv2.version_string_parts[1] == "metadata string" assert fv2.version_string_parts[2] == "another metadata string" assert len(fv2.metadata) == 2 assert fv2.metadata[0] == "metadata string" assert fv2.metadata[1] == "another metadata string" def test_libfv_fontversion_object_versionparts_meta_lists_version_with_twometa_ttfont_object(): ttf1 = TTFont("tests/testfiles/Test-VersionMoreMeta.ttf") fv1 = FontVersion(ttf1) assert len(fv1.version_string_parts) == 3 assert fv1.version_string_parts[0] == "Version 1.010" assert fv1.version_string_parts[1] == "metadata string" assert fv1.version_string_parts[2] == "another metadata string" assert len(fv1.metadata) == 2 assert fv1.metadata[0] == "metadata string" assert fv1.metadata[1] == "another metadata string" ttf2 = TTFont("tests/testfiles/Test-VersionMoreMeta.otf") fv2 = FontVersion(ttf2) assert len(fv2.version_string_parts) == 3 assert fv2.version_string_parts[0] == "Version 1.010" assert fv2.version_string_parts[1] == "metadata string" assert fv2.version_string_parts[2] == "another metadata string" assert len(fv2.metadata) == 2 assert fv2.metadata[0] == "metadata string" assert fv2.metadata[1] == "another metadata string" # # # END FontVersion INSTANTIATION TESTS # # # # # BEGIN FontVersion METHOD TESTS # # def test_libfv_fontversion_object_str_method(allfonts): fv = FontVersion(allfonts) test_string = fv.__str__() assert test_string.startswith("<fontv.libfv.FontVersion> ") is True assert fv.get_name_id5_version_string() in test_string assert fv.fontpath in test_string def test_libfv_fontversion_object_equality(allfonts): fv1 = FontVersion(allfonts) fv2 = FontVersion(allfonts) fv3 = FontVersion(allfonts) fv3.version_string_parts[0] = "Version 12.000" assert fv1 == fv2 assert (fv1 == fv3) is False assert (fv1 == "test string") is False assert (fv1 == fv1.version_string_parts) is False def test_libfv_fontversion_object_inequality(allfonts): fv1 = FontVersion(allfonts) fv2 = FontVersion(allfonts) fv3 = FontVersion(allfonts) fv3.version_string_parts[0] = "Version 12.000" assert (fv1 != fv2) is False assert fv1 != fv3 assert fv1 != "test string" assert fv1 != fv1.version_string_parts def test_libfv_clear_metadata_method(allfonts): fv = FontVersion(allfonts) fv.clear_metadata() assert len(fv.version_string_parts) == 1 assert fv.version_string_parts[0] == "Version 1.010" def test_libfv_get_head_fontrevision_method(allfonts): fv = FontVersion(allfonts) assert math.isclose( fv.get_head_fontrevision_version_number(), 1.010, abs_tol=0.00001 ) def test_libfv_get_metadata_method(): fv1 = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv2 = FontVersion("tests/testfiles/Test-VersionMeta.ttf") fv3 = FontVersion("tests/testfiles/Test-VersionMoreMeta.ttf") assert fv1.get_metadata_list() == [] assert fv2.get_metadata_list() == ["metadata string"] assert fv3.get_metadata_list() == ["metadata string", "another metadata string"] fv4 = FontVersion("tests/testfiles/Test-VersionOnly.otf") fv5 = FontVersion("tests/testfiles/Test-VersionMeta.otf") fv6 = FontVersion("tests/testfiles/Test-VersionMoreMeta.otf") assert fv4.get_metadata_list() == [] assert fv5.get_metadata_list() == ["metadata string"] assert fv6.get_metadata_list() == ["metadata string", "another metadata string"] def test_libfv_get_status_method_onlyversion(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") status_string = fv.get_state_status_substring() assert status_string == "" def test_libfv_get_status_method_development(devfonts): fv = FontVersion(devfonts) status_string = fv.get_state_status_substring() assert status_string == fv.version_string_parts[1] def test_libfv_get_status_method_release(relfonts): fv = FontVersion(relfonts) status_string = fv.get_state_status_substring() assert status_string == fv.version_string_parts[1] def test_libfv_get_status_method_nostatus(metafonts): fv = FontVersion(metafonts) status_string = fv.get_state_status_substring() assert status_string == "" def test_libfv_is_state_substring_return_match_valid(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") is_state_substring, state_substring = fv._is_state_substring_return_state_match( "[abcd123]" ) assert is_state_substring is True assert state_substring == "abcd123" is_state_substring, state_substring = fv._is_state_substring_return_state_match( "[abcd123]-dev" ) assert is_state_substring is True assert state_substring == "abcd123" is_state_substring, state_substring = fv._is_state_substring_return_state_match( "[abcd123]-release" ) assert is_state_substring is True assert state_substring == "abcd123" def test_libfv_is_state_substring_return_match_invalid(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") is_state_substring, state_substring = fv._is_state_substring_return_state_match( "abcd123" ) assert is_state_substring is False assert state_substring == "" is_state_substring, state_substring = fv._is_state_substring_return_state_match( "{abcd123}" ) assert is_state_substring is False assert state_substring == "" is_state_substring, state_substring = fv._is_state_substring_return_state_match( "[&%$#@!]" ) assert is_state_substring is False assert state_substring == "" def test_libfv_get_version_number_string(allfonts): fv = FontVersion(allfonts) assert fv.get_version_number_string() == "1.010" def test_libfv_get_version_number_string_bad_version_number(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") with pytest.raises(ValueError): # mock a bad version number substring fv.set_version_number("x.xxx") assert fv.get_version_number_string() == "" def test_libfv_get_version_number_tuple(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") assert fv.get_version_number_tuple() == (1, 0, 1, 0) # mock new version numbers in memory and confirm that they are correct in tuples fv.version = "Version 1.1" assert fv.get_version_number_tuple() == (1, 1) fv.version = "Version 1.01" assert fv.get_version_number_tuple() == (1, 0, 1) fv.version = "Version 1.001" assert fv.get_version_number_tuple() == (1, 0, 0, 1) fv.version = "Version 10.1" assert fv.get_version_number_tuple() == (10, 1) fv.version = "Version 10.01" assert fv.get_version_number_tuple() == (10, 0, 1) fv.version = "Version 10.001" assert fv.get_version_number_tuple() == (10, 0, 0, 1) fv.version = "Version 100.001" assert fv.get_version_number_tuple() == (100, 0, 0, 1) def test_libfv_get_version_number_tuple_bad_version_number(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") assert fv.get_version_number_tuple() == (1, 0, 1, 0) with pytest.raises(ValueError): # mock a bad version number substring fv.set_version_number("x.xxx") assert fv.get_version_number_tuple() is None def test_libfv_get_name_id5_version_string_method(): fv1 = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv2 = FontVersion("tests/testfiles/Test-VersionMeta.ttf") fv3 = FontVersion("tests/testfiles/Test-VersionMoreMeta.ttf") assert fv1.get_name_id5_version_string() == "Version 1.010" assert fv2.get_name_id5_version_string() == "Version 1.010;metadata string" assert ( fv3.get_name_id5_version_string() == "Version 1.010;metadata string;another metadata string" ) fv4 = FontVersion("tests/testfiles/Test-VersionOnly.otf") fv5 = FontVersion("tests/testfiles/Test-VersionMeta.otf") fv6 = FontVersion("tests/testfiles/Test-VersionMoreMeta.otf") assert fv4.get_name_id5_version_string() == "Version 1.010" assert fv5.get_name_id5_version_string() == "Version 1.010;metadata string" assert ( fv6.get_name_id5_version_string() == "Version 1.010;metadata string;another metadata string" ) def test_libfv_set_development_method_on_versiononly(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") assert len(fv.version_string_parts) == 1 fv.set_development_status() assert len(fv.version_string_parts) == 2 assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "DEV" assert fv.is_development is True assert fv.is_release is False assert fv.contains_status is True assert fv.contains_metadata is True fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") assert len(fv2.version_string_parts) == 1 fv2.set_development_status() assert len(fv2.version_string_parts) == 2 assert fv2.version_string_parts[0] == "Version 1.010" assert fv2.version_string_parts[1] == "DEV" assert fv2.is_development is True assert fv2.is_release is False assert fv2.contains_status is True assert fv2.contains_metadata is True def test_libfv_set_development_method_on_release(relfonts): fv = FontVersion(relfonts) prelength = len(fv.version_string_parts) fv.set_development_status() postlength = len(fv.version_string_parts) assert prelength == postlength assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "DEV" assert fv.is_development is True assert fv.is_release is False assert fv.contains_status is True assert fv.contains_metadata is True def test_libfv_set_development_method_on_development(devfonts): fv = FontVersion(devfonts) prelength = len(fv.version_string_parts) fv.set_development_status() postlength = len(fv.version_string_parts) assert prelength == postlength assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "DEV" assert fv.is_development is True assert fv.is_release is False assert fv.contains_status is True assert fv.contains_metadata is True def test_libfv_set_development_method_on_nostatus(metafonts): fv = FontVersion(metafonts) prelength = len(fv.version_string_parts) fv.set_development_status() postlength = len(fv.version_string_parts) assert prelength == ( postlength - 1 ) # should add an additional substring to the version string here assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "DEV" assert fv.is_development is True assert fv.is_release is False assert fv.contains_status is True assert fv.contains_metadata is True def test_libfv_set_release_method_on_versiononly(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") assert len(fv.version_string_parts) == 1 fv.set_release_status() assert len(fv.version_string_parts) == 2 assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "RELEASE" assert fv.is_development is False assert fv.is_release is True assert fv.contains_status is True assert fv.contains_metadata is True fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") assert len(fv2.version_string_parts) == 1 fv2.set_release_status() assert len(fv2.version_string_parts) == 2 assert fv2.version_string_parts[0] == "Version 1.010" assert fv2.version_string_parts[1] == "RELEASE" assert fv2.is_development is False assert fv2.is_release is True assert fv2.contains_status is True assert fv2.contains_metadata is True def test_libfv_set_release_method_on_release(relfonts): fv = FontVersion(relfonts) prelength = len(fv.version_string_parts) fv.set_release_status() postlength = len(fv.version_string_parts) assert prelength == postlength assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "RELEASE" assert fv.is_development is False assert fv.is_release is True assert fv.contains_status is True assert fv.contains_metadata is True def test_libfv_set_release_method_on_development(devfonts): fv = FontVersion(devfonts) prelength = len(fv.version_string_parts) fv.set_release_status() postlength = len(fv.version_string_parts) assert prelength == postlength assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "RELEASE" assert fv.is_development is False assert fv.is_release is True assert fv.contains_status is True assert fv.contains_metadata is True def test_libfv_set_release_method_on_nostatus(metafonts): fv = FontVersion(metafonts) prelength = len(fv.version_string_parts) fv.set_release_status() postlength = len(fv.version_string_parts) assert prelength == ( postlength - 1 ) # should add an additional substring to the version string here assert fv.version_string_parts[0] == "Version 1.010" assert fv.version_string_parts[1] == "RELEASE" assert fv.is_development is False assert fv.is_release is True assert fv.contains_status is True assert fv.contains_metadata is True def test_libfv_set_gitsha1_bad_parameters_raises_valueerror(allfonts): with pytest.raises(ValueError): fv = FontVersion(allfonts) fv.set_state_git_commit_sha1(development=True, release=True) def test_libfv_set_default_gitsha1_method(allfonts): fv = FontVersion(allfonts) fv.set_state_git_commit_sha1() sha_needle = fv.version_string_parts[1] assert ( _test_hexadecimal_sha1_formatted_string_matches(sha_needle) is True ) # confirm that set with state label assert ( _test_hexadecimal_sha1_string_matches(fv.state) is True ) # confirm that state property is properly set assert ("-dev" in sha_needle) is False assert ("-release" in sha_needle) is False assert ("DEV" in sha_needle) is False assert ("RELEASE" in sha_needle) is False def test_libfv_set_development_gitsha1_method(allfonts): fv = FontVersion(allfonts) fv.set_state_git_commit_sha1(development=True) sha_needle = fv.version_string_parts[1] assert ( _test_hexadecimal_sha1_formatted_string_matches(sha_needle) is True ) # confirm that set with state label assert ( _test_hexadecimal_sha1_string_matches(fv.state) is True ) # confirm that state property is properly set assert ("-dev" in sha_needle) is True assert ("-release" in sha_needle) is False assert ("DEV" in sha_needle) is False assert ("RELEASE" in sha_needle) is False def test_libfv_set_release_gitsha1_method(allfonts): fv = FontVersion(allfonts) fv.set_state_git_commit_sha1(release=True) sha_needle = fv.version_string_parts[1] assert ( _test_hexadecimal_sha1_formatted_string_matches(sha_needle) is True ) # confirm that set with state label assert ( _test_hexadecimal_sha1_string_matches(fv.state) is True ) # confirm that state property is properly set assert ("-dev" in sha_needle) is False assert ("-release" in sha_needle) is True assert ("DEV" in sha_needle) is False assert ("RELEASE" in sha_needle) is False def test_libfv_set_gitsha1_both_dev_release_error(capsys): fv = FontVersion("tests/testfiles/Test-VersionMeta.ttf") with pytest.raises(ValueError) as pytest_wrapped_e: fv.set_state_git_commit_sha1(release=True, development=True) out, err = capsys.readouterr() assert pytest_wrapped_e.type == ValueError def test_libfv_set_version_number(allfonts): fv = FontVersion(allfonts) prelength = len(fv.version_string_parts) fv.set_version_number("2.000") postlength = len(fv.version_string_parts) assert prelength == postlength assert fv.version_string_parts[0] == "Version 2.000" assert fv.version == "Version 2.000" assert fv.head_fontRevision == 2.000 def test_libfv_set_version_number_invalid_number(allfonts): fv = FontVersion(allfonts) with pytest.raises(ValueError): # mock a bad version number substring fv.set_version_number("x.xxx") response = fv.get_version_number_string() assert len(response) == 0 def test_libfv_set_version_string_one_substring(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv.set_version_string("Version 2.000") assert len(fv.version_string_parts) == 1 assert fv.version_string_parts[0] == "Version 2.000" assert fv.version == "Version 2.000" assert fv.head_fontRevision == 2.000 fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") fv2.set_version_string("Version 2.000") assert len(fv2.version_string_parts) == 1 assert fv2.version_string_parts[0] == "Version 2.000" assert fv2.version == "Version 2.000" assert fv2.head_fontRevision == 2.000 def test_libfv_set_version_string_two_substrings(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv.set_version_string("Version 2.000;DEV") assert len(fv.version_string_parts) == 2 assert fv.version_string_parts[0] == "Version 2.000" assert fv.version_string_parts[1] == "DEV" assert fv.version == "Version 2.000" assert fv.head_fontRevision == 2.000 fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") fv2.set_version_string("Version 2.000;DEV") assert len(fv2.version_string_parts) == 2 assert fv2.version_string_parts[0] == "Version 2.000" assert fv2.version_string_parts[1] == "DEV" assert fv.version == "Version 2.000" assert fv.head_fontRevision == 2.000 def test_libfv_set_version_string_three_substrings(): fv = FontVersion("tests/testfiles/Test-VersionOnly.ttf") fv.set_version_string("Version 2.000;DEV;other stuff") assert len(fv.version_string_parts) == 3 assert fv.version_string_parts[0] == "Version 2.000" assert fv.version_string_parts[1] == "DEV" assert fv.version_string_parts[2] == "other stuff" assert fv.version == "Version 2.000" assert fv.head_fontRevision == 2.000 fv2 = FontVersion("tests/testfiles/Test-VersionOnly.otf") fv2.set_version_string("Version 2.000;DEV;other stuff") assert len(fv2.version_string_parts) == 3 assert fv2.version_string_parts[0] == "Version 2.000" assert fv2.version_string_parts[1] == "DEV" assert fv2.version_string_parts[2] == "other stuff" assert fv2.version == "Version 2.000" assert fv2.head_fontRevision == 2.000 def test_libfv_write_version_string_method(allfonts): temp_out_file_path = os.path.join( "tests", "testfiles", "Test-Temp.ttf" ) # temp file write path fv = FontVersion(allfonts) fv.set_version_number("2.000") fv.write_version_string(fontpath=temp_out_file_path) fv2 = FontVersion(temp_out_file_path) assert fv2.version_string_parts[0] == "Version 2.000" assert fv2.version == "Version 2.000" assert fv2.head_fontRevision == 2.000 # modify again to test write to same temp file path without use of the fontpath parameter in # order to test the block of code where that is handled fv2.set_version_number("3.000") fv2.write_version_string() fv3 = FontVersion(temp_out_file_path) assert fv3.version_string_parts[0] == "Version 3.000" assert fv3.version == "Version 3.000" assert fv3.head_fontRevision == 3.000 os.remove(temp_out_file_path) def test_libfv_write_version_string_method_ttfont_object(allfonts): temp_out_file_path = os.path.join( "tests", "testfiles", "Test-Temp.ttf" ) # temp file write path ttf = TTFont(allfonts) fv = FontVersion(ttf) fv.set_version_number("2.000") fv.write_version_string(fontpath=temp_out_file_path) fv2 = FontVersion(temp_out_file_path) assert fv2.version_string_parts[0] == "Version 2.000" assert fv2.version == "Version 2.000" assert fv2.head_fontRevision == 2.000 # modify again to test write to same temp file path without use of the fontpath parameter in # order to test the block of code where that is handled fv2.set_version_number("3.000") fv2.write_version_string() fv3 = FontVersion(temp_out_file_path) assert fv3.version_string_parts[0] == "Version 3.000" assert fv3.version == "Version 3.000" assert fv3.head_fontRevision == 3.000 os.remove(temp_out_file_path)
34.710575
96
0.737693
5,032
37,418
5.241256
0.051073
0.080344
0.083946
0.051566
0.891598
0.849208
0.809661
0.753811
0.724539
0.683173
0
0.030955
0.159095
37,418
1,077
97
34.742804
0.807246
0.101582
0
0.670185
0
0
0.184148
0.126257
0
0
0
0
0.435356
1
0.097625
false
0.001319
0.010554
0.006596
0.121372
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
7
9354ca7eb2b7d86783ac0f789f7e453d5ae37a5f
7,135
py
Python
Calculate_power_absorbed_cylinders_0_2.py
mkraft89/To_eels_app
9e2fc6d501f51130b12164997f40cb7e0ec5a654
[ "Unlicense" ]
null
null
null
Calculate_power_absorbed_cylinders_0_2.py
mkraft89/To_eels_app
9e2fc6d501f51130b12164997f40cb7e0ec5a654
[ "Unlicense" ]
null
null
null
Calculate_power_absorbed_cylinders_0_2.py
mkraft89/To_eels_app
9e2fc6d501f51130b12164997f40cb7e0ec5a654
[ "Unlicense" ]
null
null
null
import numpy as np from math import factorial gamma = 0.32 n = np.arange(0,30,1) ep0 = 8.85e-12 epD = 1.0 c = 3e8 Conv = 1.602e-19/6.626e-34*2*np.pi #Conversion from eV to SI-units def Pow_abs_vert(R0, R1, R2, g2, x_e, c_e, omega): """Calculate the power absorbed for two nearly touching cylinders if an electron passes them along their vertical axis. Output: Resistive losses as a function of omega Variable: R0 = inversion point R1 = inner radius of dielectric shell R2 = outer radius of dielectric shell g2 = scaling factor g^2 x_e = electrons position c_e = electron velocity omega = angular frequency in eV """ epM = 1-64/(omega*(omega+1j*gamma)) epD = 1. omega = omega*Conv beta = 2 * epD / (epM+epD) alpha = (epM-epD) / (epM+epD) an_p = np.zeros(np.size(n), dtype='complex128') an_n = np.zeros(np.size(n), dtype='complex128') ###Lambda = 4*pi*eps0 in expression for source coefficients ###Calculate lambda according to formula in Yu's document ###on EELS in nearly touching cylinders for k_n in range(1,np.size(n)): k_s = n[1:k_n+1] sum_k_n = 0 sum_k_p = 0 for m in k_s: sum_k_p += factorial(k_n-1) * (1j*omega*g2/c_e/R0)**m\ / (factorial(m) * factorial(m-1) * \ factorial(k_n - m)) sum_k_n += factorial(k_n-1) * (-1j*omega*g2/c_e/R0)**m\ / (factorial(m) * factorial(m-1) * \ factorial(k_n - m)) an_p[k_n] = (-1j)**k_n * np.exp(omega/c_e*(1j*g2/R0-x_e))\ / omega * sum_k_p an_n[k_n] = (1j)**k_n * np.exp(-omega/c_e*x_e)\ / omega * sum_k_n #Calculate expansion coefficients as in Yu's document fin_p = - beta * 1./(alpha**2-(R2/R1)**(2*n))\ * (R2/R1)**(2*n) * an_p fin_n = beta * 1./(alpha**2-(R2/R1)**(2*n))\ * alpha * (R0/R1)**(2*n) * an_n fon_p = beta * 1./(alpha**2-(R2/R1)**(2*n))\ * alpha * (R2/R0)**(2*n) * an_p fon_n = - beta * 1./(alpha**2-(R2/R1)**(2*n))\ * (R2/R1)**(2*n) * an_n #Calculate the power absorption Term1 = sum(n * (R1/R0)**(2*n) * (abs(fin_n)**2 + abs(fin_p)**2)) Term2 = sum(n * (R0/R2)**(2*n) * (abs(fon_n)**2 + abs(fon_p)**2)) return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2) def Pow_abs_hori(R0, R1, R2, g2, y_e, c_e, omega): """Calculate the power absorbed for two nearly touching cylinders if an electron passes them horizontally but outside the gap and not through the cylinders. Output: Resistive losses as a function of omega Variable: R0 = inversion point R1 = inner radius of dielectric shell R2 = outer radius of dielectric shell g2 = scaling factor g^2 y_e = electrons position vertical position c_e = electron velocity omega = angular frequency in eV """ epM = 1-64/(omega*(omega+1j*gamma)) epD = 1. omega = omega*Conv beta = 2 * epD / (epM+epD) alpha = (epM-epD) / (epM+epD) an_p = np.zeros(np.size(n), dtype='complex128') an_n = np.zeros(np.size(n), dtype='complex128') ###Lambda = 4*pi*eps0 in expression for source coefficients ###Calculate lambda according to formula in Yu's document ###on EELS in nearly touching cylinders for k_n in range(1,np.size(n)): k_s = n[1:k_n+1] sum_k_n = 0 sum_k_p = 0 for m in k_s: sum_k_p += factorial(k_n-1) * (-omega*g2/c_e/R0)**m\ / (factorial(m) * factorial(m-1) * \ factorial(k_n - m)) sum_k_n += factorial(k_n-1) * (+omega*g2/c_e/R0)**m\ / (factorial(m) * factorial(m-1) * \ factorial(k_n - m)) an_n[k_n] = (1j)**k_n * np.exp(omega/c_e*(g2/R0-y_e))\ / omega * sum_k_n an_p[k_n] = (-1j)**k_n * np.exp(-omega/c_e*y_e)\ / omega * sum_k_p #Calculate expansion coefficients as in Yu's document fin_n = - beta * 1./(alpha**2-(R2/R1)**(2*n))\ * (R2/R1)**(2*n) * an_n fin_p = beta * 1./(alpha**2-(R2/R1)**(2*n))\ * alpha * (R0/R1)**(2*n) * an_p fon_n = beta * 1./(alpha**2-(R2/R1)**(2*n))\ * alpha * (R2/R0)**(2*n) * an_n fon_p = - beta * 1./(alpha**2-(R2/R1)**(2*n))\ * (R2/R1)**(2*n) * an_p #Calculate the power absorption Term1 = sum(n * (R1/R0)**(2*n) * (abs(fin_n)**2 + abs(fin_p)**2)) Term2 = sum(n * (R0/R2)**(2*n) * (abs(fon_n)**2 + abs(fon_p)**2)) return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2) def Pow_abs_gap(R0, R1, R2, g2, y_e, c_e, omega): """Calculate the power absorbed for two nearly touching cylinders if an electron passes them horizontally exactly through the gap between the cylinders. Output: Resistive losses as a function of omega Variable: R0 = inversion point R1 = inner radius of dielectric shell R2 = outer radius of dielectric shell g2 = scaling factor g^2 y_e = electrons position vertical position c_e = electron velocity omega = angular frequency in eV """ epM = 1-64/(omega*(omega+1j*gamma)) epD = 1. omega = omega*Conv beta = 2 * epD / (epM+epD) alpha = (epM-epD) / (epM+epD) aS_p = np.zeros(np.size(n), dtype='complex128') aS_n = np.zeros(np.size(n), dtype='complex128') ###Lambda = 4*pi*eps0 in expression for source coefficients ###Calculate lambda according to formula in Yu's document ###on EELS in nearly touching cylinders for k_n in range(1,np.size(n)): k_s = n[1:k_n+1] sum_k_n = 0 sum_k_p = 0 for m in k_s: sum_k_p += factorial(k_n-1) * (-omega*g2/c_e/R0)**m\ / (factorial(m) * factorial(m-1) * \ factorial(k_n - m)) sum_k_n += factorial(k_n-1) * (-omega*g2/c_e/R0)**m\ / (factorial(m) * factorial(m-1) * \ factorial(k_n - m)) aS_p[k_n] = (-1j)**k_n * np.exp(omega/c_e*(-g2/R0+y_e))\ / omega * sum_k_n aS_n[k_n] = (-1j)**k_n * np.exp(-omega/c_e*y_e)\ / omega * sum_k_p #Calculate expansion coefficients as in Yu's document bn_p = alpha * 1./(alpha**2-(R2/R1)**(2*n))\ * ((R0/R1)**(2*n)*aS_n - alpha*aS_p) cn_p = alpha * 1./(alpha**2-(R2/R1)**(2*n))\ * ((R2/R0)**(2*n)*aS_p - alpha*aS_n) fin_p = epD / epM * \ (bn_p + aS_p - cn_p*(R0/R1)**(2*n)) fon_p = epD / epM * \ (cn_p + aS_n - bn_p*(R2/R0)**(2*n)) #Calculate the power absorption Term1 = sum(n * (R1/R0)**(2*n) * abs(fin_p)**2) Term2 = sum(n * (R0/R2)**(2*n) * abs(fon_p)**2) return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2) if __name__ == "__main__": print 'Error: Supposed to be called as a function, not main module.'
31.995516
72
0.53609
1,164
7,135
3.141753
0.122852
0.021329
0.019688
0.02297
0.889527
0.882964
0.882964
0.882964
0.874214
0.851518
0
0.056786
0.303994
7,135
222
73
32.13964
0.679621
0.10007
0
0.580357
0
0
0.02623
0
0
0
0
0
0
0
null
null
0
0.017857
null
null
0.008929
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
fa8e415770ceff55cae51b87497939526f0c2ad7
65
py
Python
pysymspell/__init__.py
Yididya/amharic_spell_corrector
5e70fb19af3acb004d985a1820e6f9d4f4d569fa
[ "MIT" ]
17
2018-07-10T18:16:42.000Z
2022-03-27T18:20:57.000Z
pysymspell/__init__.py
Yididya/amharic_spell_corrector
5e70fb19af3acb004d985a1820e6f9d4f4d569fa
[ "MIT" ]
2
2021-03-20T09:05:38.000Z
2022-02-11T20:24:29.000Z
pysymspell/__init__.py
Yididya/amharic_spell_corrector
5e70fb19af3acb004d985a1820e6f9d4f4d569fa
[ "MIT" ]
8
2018-07-10T18:28:37.000Z
2022-02-03T16:50:42.000Z
from .symspell import EditDistance from .symspell import SymSpell
32.5
34
0.861538
8
65
7
0.5
0.428571
0.642857
0
0
0
0
0
0
0
0
0
0.107692
65
2
35
32.5
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
fabcadba192a707b5b428810bbb87d2b5fc71d06
47
py
Python
exts/maticodes.viewport.reticle/maticodes/viewport/reticle/__init__.py
matiascodesal/omni-camera-reticle
2a48fddcba07cb5cdfcf959ad977983bd36e2f7a
[ "Apache-2.0" ]
null
null
null
exts/maticodes.viewport.reticle/maticodes/viewport/reticle/__init__.py
matiascodesal/omni-camera-reticle
2a48fddcba07cb5cdfcf959ad977983bd36e2f7a
[ "Apache-2.0" ]
null
null
null
exts/maticodes.viewport.reticle/maticodes/viewport/reticle/__init__.py
matiascodesal/omni-camera-reticle
2a48fddcba07cb5cdfcf959ad977983bd36e2f7a
[ "Apache-2.0" ]
null
null
null
from .extension import CameraReticleExtension
23.5
46
0.87234
4
47
10.25
1
0
0
0
0
0
0
0
0
0
0
0
0.106383
47
1
47
47
0.97619
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
87a0475a9ddab5a22903e7d1309a5957a8d53f10
274
py
Python
pytradfri/__init__.py
BarracudaPff/code-golf-data-pythpn
42e8858c2ebc6a061012bcadb167d29cebb85c5e
[ "MIT" ]
null
null
null
pytradfri/__init__.py
BarracudaPff/code-golf-data-pythpn
42e8858c2ebc6a061012bcadb167d29cebb85c5e
[ "MIT" ]
null
null
null
pytradfri/__init__.py
BarracudaPff/code-golf-data-pythpn
42e8858c2ebc6a061012bcadb167d29cebb85c5e
[ "MIT" ]
null
null
null
"""Implement an API wrapper around Ikea Tradfri.""" from .error import PytradfriError, RequestError, ClientError, ServerError, RequestTimeout from .gateway import Gateway __all__ = ["Gateway", "PytradfriError", "RequestError", "ClientError", "ServerError", "RequestTimeout"]
68.5
103
0.784672
26
274
8.115385
0.653846
0.246446
0.350711
0.454976
0.587678
0
0
0
0
0
0
0
0.094891
274
4
103
68.5
0.850806
0.164234
0
0
0
0
0.308036
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
7
87c6e02f60cb7cb80968368c017700cbd9dc7f8d
38,846
py
Python
config/rules.py
zhouhongf/spider_news
6ea4a80d050bf3ab70724f45312c5a2580e714a6
[ "MIT" ]
2
2021-11-27T06:40:44.000Z
2022-02-23T11:19:11.000Z
config/rules.py
zhouhongf/spider_news
6ea4a80d050bf3ab70724f45312c5a2580e714a6
[ "MIT" ]
null
null
null
config/rules.py
zhouhongf/spider_news
6ea4a80d050bf3ab70724f45312c5a2580e714a6
[ "MIT" ]
1
2021-12-15T09:11:16.000Z
2021-12-15T09:11:16.000Z
#!/usr/bin/env python from collections import namedtuple from urllib.parse import urlencode, urlparse, urljoin, quote, unquote from myspiders.base import BaseField, Bs4TextField, Bs4HtmlField, Bs4AttrField, Bs4AttrTextField, JsonField, JsonMultiField import re import time from config.target import Target # type_main格式规定为2个中文字 # type_next格式规定为4个中文字 # 新闻,公告,业务,活动 # 新闻:来源本行,来源媒体 # 公告:采购公告,招聘公告,服务公告,其他公告 class Rules: pattern_string = re.compile(r'^((?!(【详情】|【详细】|更多)).)*$') pattern_date = re.compile(r'20[0-9]{2}[-年/][01]?[0-9][-月/][0123]?[0-9]日?') pattern_sina = re.compile(r'https://finance.sina.com.cn/.+/doc-.+\.(shtml|shtm|html|htm)') RULES_NEWS = { Target( bank_name='新浪财经', type_main='新闻', type_next='首页要闻', url='http://finance.sina.com.cn/money/bank/', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': pattern_sina}, string=pattern_string), ] ), Target( bank_name='新浪财经', type_main='新闻', type_next='监管政策', url='http://finance.sina.com.cn/roll/index.d.html?cid=56689&page=1', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': pattern_sina}, string=pattern_string), ] ), Target( bank_name='新浪财经', type_main='新闻', type_next='公司动态', url='http://finance.sina.com.cn/roll/index.d.html?cid=80798&page=1', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': pattern_sina}, string=pattern_string), ] ), Target( bank_name='新浪财经', type_main='新闻', type_next='产品业务', url='http://finance.sina.com.cn/roll/index.d.html?cid=56693&page=1', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': pattern_sina}, string=pattern_string), ] ), Target( bank_name='新浪财经', type_main='新闻', type_next='理财要闻', url='http://finance.sina.com.cn/money/', selectors=[ Bs4HtmlField(attrs={'id': re.compile(r'subShowContent1_news[0-9]')}), Bs4AttrTextField(target='href', name='a', attrs={'href': pattern_sina}, string=pattern_string), ] ), Target( bank_name='新浪财经', type_main='新闻', type_next='理财要闻', url='http://finance.sina.com.cn/money/', selectors=[ Bs4AttrTextField(target='href', css_select='div[id^="subShowContent1_news"] .news-item h2 a[href*="/doc-"]'), ] ), } # 每次仅爬取首页内容 RULES = { Target( bank_name='工商银行', type_main='新闻', type_next='来源本行', url='http://www.icbc.com.cn/icbc/%e5%b7%a5%e8%a1%8c%e9%a3%8e%e8%b2%8c/%e5%b7%a5%e8%a1%8c%e5%bf%ab%e8%ae%af/default.htm', selectors=[ Bs4AttrTextField(target='href', attrs={'class': 'data-collecting-sign textgs'}), Bs4HtmlField(attrs={'id': 'MyFreeTemplateUserControl'}, many=False) ] ), Target( bank_name='中国银行', type_main='新闻', type_next='来源本行', url='https://www.boc.cn/aboutboc/bi1/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='.news ul.list li a'), Bs4HtmlField(css_select='.content.con_area .TRS_Editor', many=False) ] ), Target( bank_name='中国银行', type_main='公告', type_next='其他公告', url='https://www.boc.cn/custserv/bi2/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='.news ul.list li a'), Bs4HtmlField(css_select='.content.con_area .TRS_Editor', many=False) ] ), Target( bank_name='中国银行', type_main='公告', type_next='采购公告', url='https://www.boc.cn/aboutboc/bi6/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='.news ul.list li a'), Bs4HtmlField(css_select='.content.con_area .TRS_Editor', many=False) ] ), Target( bank_name='农业银行', type_main='新闻', type_next='来源本行', url='http://www.abchina.com/cn/AboutABC/nonghzx/NewsCenter/default.htm', selectors=[ Bs4AttrTextField(target='href', css_select='.details_rightC.fl a'), Bs4HtmlField(css_select='.details_right .TRS_Editor', many=False) ] ), Target( bank_name='农业银行', type_main='公告', type_next='采购公告', url='http://www.abchina.com/cn/AboutABC/CG/BM/default.htm', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': re.compile(r'\.htm|\.html')}, string=re.compile(r'公告')), Bs4HtmlField(css_select='.content_right_detail .TRS_Editor', many=False) ] ), Target( bank_name='农业银行', type_main='公告', type_next='采购公告', url='http://www.abchina.com/cn/AboutABC/CG/Purchase/default.htm', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': re.compile(r'\.htm|\.html')}, string=re.compile(r'公告')), Bs4HtmlField(css_select='.content_right_detail .TRS_Editor', many=False) ] ), # 建设银行 的还有各省份分行 分支 有待爬取 Target( bank_name='建设银行', type_main='新闻', type_next='来源本行', url='http://www.ccb.com/cn/v3/include/notice/zxgg_1.html', selectors=[ Bs4AttrTextField(target='href', name='a', attrs={'href': re.compile(r'\.htm|\.html'), 'class': 'blue3', 'title': True}), Bs4HtmlField(attrs={'id': 'ti'}, many=False) ] ), Target( bank_name='交通银行', type_main='新闻', type_next='来源本行', url='http://www.bankcomm.com/BankCommSite/shtml/jyjr/cn/7158/7162/list_1.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.main ul.tzzgx-conter.ty-list li a'), Bs4HtmlField(attrs={'class': 'show_main c_content'}, many=False) ] ), Target( bank_name='邮储银行', type_main='新闻', type_next='来源本行', url='http://www.psbc.com/cn/index/syycxw/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#article_1 li.clearfix a'), Bs4HtmlField(attrs={'class': 'news_cont_msg'}, many=False) ] ), Target( bank_name='邮储银行', type_main='公告', type_next='其他公告', url='http://www.psbc.com/cn/index/ggl/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#article_1 li.clearfix a'), Bs4HtmlField(attrs={'class': 'news_cont_msg'}, many=False) ] ), Target( bank_name='中信银行', type_main='新闻', type_next='来源本行', url='http://www.citicbank.com/about/companynews/banknew/message/%s/index.html' % time.strftime('%Y'), selectors=[ Bs4AttrTextField(target='href', css_select='#business ul.dhy_b li a'), Bs4HtmlField(attrs={'class': re.compile(r'TRS_Editor|main_content')}, many=False) ] ), Target( bank_name='中信银行', type_main='新闻', type_next='来源本行', url='http://www.citicbank.com/about/companynews/zxsh/', selectors=[ Bs4AttrTextField(target='href', css_select='#business ul.dhy_b li a'), Bs4HtmlField(attrs={'class': re.compile(r'TRS_Editor|main_content')}, many=False) ] ), Target( bank_name='中信银行', type_main='公告', type_next='服务公告', url='http://www.citicbank.com/common/servicenotice/', selectors=[ Bs4AttrTextField(target='href', css_select='#business ul.dhy_b li a'), Bs4HtmlField(attrs={'class': re.compile(r'TRS_Editor|main_content')}, many=False) ] ), Target( bank_name='招商银行', type_main='新闻', type_next='来源本行', url='http://www.cmbchina.com/cmbinfo/news/', selectors=[ Bs4AttrTextField(target='href', css_select='#column_content span.c_title a'), Bs4HtmlField(attrs={'class': re.compile(r'infodiv|c_content')}, many=False) ] ), Target( bank_name='招商银行', type_main='新闻', type_next='来源本行', url='http://www.cmbchina.com/cmbinfo/news/', selectors=[ Bs4AttrTextField(target='href', css_select='#column_content span.c_title a'), Bs4HtmlField(css_select='#column_content .c_content', many=False) ] ), Target( bank_name='民生银行', type_main='新闻', type_next='来源本行', url='http://www.cmbc.com.cn/jrms/msdt/msxw/index.htm', selectors=[ Bs4AttrTextField(target='href', css_select='li.left_ul520 a'), Bs4HtmlField(css_select='.counter_mid .count_table', many=False) ] ), Target( bank_name='民生银行', type_main='新闻', type_next='来源本行', url='http://www.cmbc.com.cn/jrms/msdt/mtgz/index.htm', selectors=[ Bs4AttrTextField(target='href', css_select='li.left_ul520 a'), Bs4HtmlField(css_select='.counter_mid .count_table', many=False) ] ), Target( bank_name='民生银行', type_main='公告', type_next='其他公告', url='http://www.cmbc.com.cn/zdtj/zygg/index.htm', selectors=[ Bs4AttrTextField(target='href', css_select='li.left_ul520 a'), Bs4HtmlField(css_select='.counter_mid_1 .count_one', many=False) ] ), Target( bank_name='民生银行', type_main='新闻', type_next='来源本行', url='http://www.cmbc.com.cn/jrms/msdt/fykyzq/index.htm', selectors=[ Bs4AttrTextField(target='href', css_select='li.left_ul520 a'), Bs4HtmlField(css_select='.counter_mid .count_table', many=False) ] ), # 浦发银行的采购公告是PDF文件格式,后期再添加解析PDF文件的功能 Target( bank_name='浦发银行', type_main='新闻', type_next='来源本行', url='https://news.spdb.com.cn/about_spd/xwdt_1632/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.c_news_body ul li a'), Bs4HtmlField(css_select='.fixed_width .TRS_Editor', many=False) ] ), Target( bank_name='浦发银行', type_main='新闻', type_next='来源本行', url='https://news.spdb.com.cn/about_spd/media/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.c_news_body ul li a'), Bs4HtmlField(css_select='.fixed_width .TRS_Editor', many=False) ] ), Target( bank_name='兴业银行', type_main='新闻', type_next='来源本行', url='https://www.cib.com.cn/cn/aboutCIB/about/news/', selectors=[ Bs4AttrTextField(target='href', css_select='.list-box .middle ul:nth-of-type(2) li a'), Bs4HtmlField(css_select='.detail-box .middle', many=False) ] ), Target( bank_name='兴业银行', type_main='公告', type_next='其他公告', url='https://www.cib.com.cn/cn/aboutCIB/about/notice/', selectors=[ Bs4AttrTextField(target='href', css_select='.list-box .middle ul:nth-of-type(2) li a'), Bs4HtmlField(css_select='.detail-box .middle', many=False) ] ), Target( bank_name='平安银行', type_main='新闻', type_next='来源本行', url='http://bank.pingan.com/ir/gonggao/xinwen/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.span10 ul.list li a'), Bs4HtmlField(css_select='.container .row:last-of-type .span10 .row .span10 .box', many=False) ] ), Target( bank_name='广发银行', type_main='新闻', type_next='来源本行', url='http://www.cgbchina.com.cn/Channel/11625977', selectors=[ Bs4AttrTextField(target='href', css_select='ul.newList li a'), Bs4HtmlField(css_select='#textContent .textContent', many=False) ] ), Target( bank_name='广发银行', type_main='公告', type_next='其他公告', url='http://www.cgbchina.com.cn/Channel/11640277', selectors=[ Bs4AttrTextField(target='href', css_select='ul.newList li a'), Bs4HtmlField(attrs={'id': 'textContent'}, many=False) ] ), Target( bank_name='光大银行', type_main='新闻', type_next='来源本行', url='http://www.cebbank.com/site/ceb/gddt/xnxw52/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#main_con ul.gg_right_ul li a'), Bs4HtmlField(css_select='.gd_xilan .xilan_con', many=False) ] ), Target( bank_name='光大银行', type_main='新闻', type_next='来源本行', url='http://www.cebbank.com/site/ceb/gddt/mtgz/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#main_con ul.gg_right_ul li a'), Bs4HtmlField(css_select='.gd_xilan .xilan_con', many=False) ] ), Target( bank_name='光大银行', type_main='公告', type_next='其他公告', url='http://www.cebbank.com/site/zhpd/zxgg35/gdgg10/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#gg_right ul.gg_right_ul li a'), Bs4HtmlField(css_select='.gd_xilan .xilan_con', many=False) ] ), Target( bank_name='光大银行', type_main='公告', type_next='采购公告', url='http://www.cebbank.com/site/zhpd/zxgg35/cggg/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#gg_right ul.gg_right_ul li a'), Bs4HtmlField(css_select='.gd_xilan .xilan_con', many=False) ] ), Target( bank_name='光大银行', type_main='公告', type_next='采购公告', url='http://www.cebbank.com/site/zhpd/zxgg35/cgjggg/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#gg_right ul.gg_right_ul li a'), Bs4HtmlField(css_select='.gd_xilan .xilan_con', many=False) ] ), Target( bank_name='华夏银行', type_main='新闻', type_next='来源本行', url='http://www.hxb.com.cn/jrhx/hxzx/hxxw/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.pro_contlist ul li.pro_contli a'), Bs4HtmlField(attrs={'id': 'content'}, many=False) ] ), Target( bank_name='华夏银行', type_main='公告', type_next='其他公告', url='http://www.hxb.com.cn/jrhx/khfw/zxgg/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.pro_contlist ul li.pro_contli a'), Bs4HtmlField(attrs={'id': 'content'}, many=False) ] ), Target( bank_name='浙商银行', type_main='新闻', type_next='来源本行', url='http://www.czbank.com/cn/pub_info/news/', selectors=[ Bs4AttrTextField(target='href', css_select='#content dd a'), Bs4HtmlField(css_select='.cdv_content .TRS_Editor', many=False) ] ), Target( bank_name='浙商银行', type_main='公告', type_next='其他公告', url='http://www.czbank.com/cn/pub_info/important_notice/', selectors=[ Bs4AttrTextField(target='href', css_select='.list_content dd a'), Bs4HtmlField(css_select='.cdv_content .TRS_Editor', many=False) ] ), Target( bank_name='浙商银行', type_main='新闻', type_next='来源本行', url='http://www.czbank.com/cn/pub_info/Outside_reports/', selectors=[ Bs4AttrTextField(target='href', css_select='.list_content dd a'), Bs4HtmlField(css_select='.cdv_content .TRS_Editor', many=False) ] ), Target( bank_name='恒丰银行', type_main='新闻', type_next='来源本行', url='http://www.hfbank.com.cn/gyhf/hfxw/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='#imgArticleList li h3 a'), Bs4HtmlField(css_select='.infoArticle .articleCon', many=False) ] ), Target( bank_name='恒丰银行', type_main='公告', type_next='其他公告', url='http://www.hfbank.com.cn/gryw/yhgg/index.shtml', selectors=[ Bs4AttrTextField(target='href', css_select='.annWrap li h3 a'), Bs4HtmlField(css_select='.infoArticle .articleCon', many=False) ] ), Target( bank_name='北京银行', type_main='公告', type_next='其他公告', url='http://www.bankofbeijing.com.cn/about/gonggao.html', selectors=[ Bs4AttrTextField( target='href', css_select='#area .content_left ul.sub_news li span a', url_prefix='http://www.bankofbeijing.com.cn/'), Bs4HtmlField(attrs={'id': 'con'}, many=False) ] ), Target( bank_name='贵阳银行', type_main='公告', type_next='其他公告', url='https://www.bankgy.cn/portal/zh_CN/home/news/notice/list.html', selectors=[ Bs4AttrTextField( target='href', css_select='.detailConter .mewlist ul li a[href^="news/notice/2020"]', url_prefix='https://www.bankgy.cn/portal/zh_CN/home/'), Bs4HtmlField(css_select='.detailConter .textConter', many=False) ] ), Target( bank_name='贵阳银行', type_main='新闻', type_next='来源本行', url='https://www.bankgy.cn/portal/zh_CN/home/news/dynamic/list.html', selectors=[ Bs4AttrTextField( target='href', css_select='.detailConter .mewlist ul li a[href^="news/dynamic/2020"]', url_prefix='https://www.bankgy.cn/portal/zh_CN/home/'), Bs4HtmlField(css_select='.detailConter .textConter', many=False) ] ), Target( bank_name='杭州银行', type_main='公告', type_next='其他公告', url='http://www.hzbank.com.cn/hzyh/index/bxgg/index.html', selectors=[ Bs4AttrTextField( target='href', css_select='#yc_main .portlet ul.new_list1 li span:first-of-type a[href^="/hzyh/index/bxgg/"]', url_prefix='http://www.hzbank.com.cn/'), Bs4HtmlField(css_select='#easysiteText', many=False) ] ), Target( bank_name='杭州银行', type_main='新闻', type_next='来源本行', url='http://www.hzbank.com.cn/hzyh/index/bxkx/index.html', selectors=[ Bs4AttrTextField( target='href', css_select='#yc_main .portlet ul.new_list1 li span:first-of-type a[href^="/hzyh/index/bxkx/"]', url_prefix='http://www.hzbank.com.cn/'), Bs4HtmlField(css_select='#easysiteText', many=False) ] ), Target( bank_name='江苏银行', type_main='公告', type_next='其他公告', url='http://www.jsbchina.cn/CN/zygg/index.html?flag=0', selectors=[ Bs4AttrTextField( target='href', css_select='#myTab0_Content ul a[href^="/CN/zygg/"]', url_prefix='http://www.jsbchina.cn/'), Bs4HtmlField(css_select='#myTab0_Content0', many=False) ] ), Target( bank_name='江苏银行', type_main='新闻', type_next='来源本行', url='http://www.jsbchina.cn/CN/gywh/ggywh/gwhxx/index.html?flag=3', selectors=[ Bs4AttrTextField( target='href', css_select='#myTab0_Content ul a[href^="/CN/gywh/ggywh/gwhxx/"]', url_prefix='http://www.jsbchina.cn/'), Bs4HtmlField(css_select='#myTab0_Content0', many=False) ] ), Target( bank_name='江苏银行', type_main='新闻', type_next='来源媒体', url='http://www.jsbchina.cn/CN/gywh/ggywh/gmtgz/index.html?flag=1', selectors=[ Bs4AttrTextField( target='href', css_select='#myTab0_Content ul a[href^="/CN/gywh/ggywh/gmtgz/"]', url_prefix='http://www.jsbchina.cn/'), Bs4HtmlField(css_select='#myTab0_Content0', many=False) ] ), Target( bank_name='南京银行', type_main='公告', type_next='其他公告', url='http://www.njcb.com.cn/njcb/index/_301021/index.html', selectors=[ Bs4AttrTextField( target='href', css_select='#right_lib .erjilib_mk .erjilib_mkcon .erji_libcon .erji_lib p.erji_libtit a', url_prefix='http://www.njcb.com.cn'), Bs4HtmlField(css_select='#news_content', many=False) ] ), Target( bank_name='南京银行', type_main='新闻', type_next='来源本行', url='http://www.njcb.com.cn/njcb/gywx/xwzx/index.html', selectors=[ Bs4AttrTextField( target='href', css_select='#right_lib .erjilib_mk .erjilib_mkcon .erji_libcon .erji_lib p.erji_libtit a', url_prefix='http://www.njcb.com.cn'), Bs4HtmlField(css_select='#news_content', many=False) ] ), Target( bank_name='南京银行', type_main='新闻', type_next='来源媒体', url='http://www.njcb.com.cn/njcb/gywx/_300910/index.html', selectors=[ Bs4AttrTextField( target='href', css_select='#right_lib .erjilib_mk .erjilib_mkcon .erji_libcon .erji_lib p.erji_libtit a', url_prefix='http://www.njcb.com.cn'), Bs4HtmlField(css_select='#news_content', many=False) ] ), Target( bank_name='宁波银行', type_main='公告', type_next='其他公告', url='http://www.nbcb.com.cn/home/important_notices/', selectors=[ Bs4AttrTextField( target='href', css_select='#ul_list li a', url_prefix='http://www.nbcb.com.cn/home/important_notices/'), Bs4HtmlField(css_select='#cms_wrapper .cms_cont', many=False) ] ), Target( bank_name='宁波银行', type_main='新闻', type_next='来源本行', url='http://www.nbcb.com.cn/investor_relations/internal_news/', selectors=[ Bs4AttrTextField( target='href', css_select='#ul_list li a', url_prefix='http://www.nbcb.com.cn/investor_relations/internal_news/'), Bs4HtmlField(css_select='#cms_wrapper .cms_cont', many=False) ] ), Target( bank_name='宁波银行', type_main='新闻', type_next='来源媒体', url='http://www.nbcb.com.cn/investor_relations/media_release/', selectors=[ Bs4AttrTextField( target='href', css_select='#ul_list li a', url_prefix='http://www.nbcb.com.cn/investor_relations/media_release/'), Bs4HtmlField(css_select='#cms_wrapper .cms_cont', many=False) ] ), Target( bank_name='青岛银行', type_main='新闻', type_next='来源媒体', url='http://www.qdccb.com/qyxx/xnxx/mtkwx/index.shtml', selectors=[ Bs4AttrTextField( target='href', css_select='.tableList tr td:nth-of-type(2) a[href^="/qyxx/xnxx/mtkwx/"]', url_prefix='http://www.qdccb.com/'), Bs4HtmlField(css_select='.main .second_right .second_rightContent', many=False) ] ), Target( bank_name='上海银行', type_main='公告', type_next='其他公告', url='http://www.bosc.cn/zh/sy/sy_zxgg/index.shtml', selectors=[ Bs4AttrTextField( target='href', css_select='ul.dzyhdt a[href^="/zh/sy/sy_zxgg/"]', url_prefix='http://www.bosc.cn/'), Bs4HtmlField(css_select='div.fr.w706 > div.m_smaBor.mt15 > div > div', many=False) ] ), Target( bank_name='上海银行', type_main='新闻', type_next='来源本行', url='http://www.bosc.cn/zh/sy/sy_sykx/index.shtml', selectors=[ Bs4AttrTextField( target='href', css_select='ul.dzyhdt a[href^="/zh/sy/sy_sykx/"]', url_prefix='http://www.bosc.cn/'), Bs4HtmlField(css_select='div.fr.w706 > div.m_smaBor.mt15 > div > div', many=False) ] ), Target( bank_name='苏州银行', type_main='公告', type_next='其他公告', url='http://www.suzhoubank.com/icms/static/szbank/zh/0d6u9hrt/yvngsgtq/7d99gtx7/x1nwoo1t/pageInfo.txt', selectors=[ JsonMultiField(json_select='url=title=date', url_prefix='http://www.suzhoubank.com/icms/'), Bs4HtmlField(css_select='div.index-main > div.wp > div.fr.right > div.huodong > div.dzyh_main', many=False) ] ), Target( bank_name='苏州银行', type_main='新闻', type_next='来源本行', url='http://www.suzhoubank.com/icms/static/szbank/zh/0d6u9hrt/yvngsgtq/7d99gtx7/46idcb1g/pageInfo.txt', selectors=[ JsonMultiField(json_select='url=title=date', url_prefix='http://www.suzhoubank.com/icms/'), Bs4HtmlField(css_select='div.index-main > div.wp > div.fr.right > div.huodong > div.dzyh_main', many=False) ] ), Target( bank_name='西安银行', type_main='新闻', type_next='来源本行', url='https://www.xacbank.com/icms/static/xacbank2019/zh/jvsxapz8/jo1txlfm/y2zpei09/pageInfo.txt?t=%s' % int(time.time() * 1000), selectors=[ JsonMultiField(json_select='url=title=date', url_prefix='https://www.xacbank.com/icms/'), Bs4HtmlField(css_select='#text_css', many=False) ] ), Target( bank_name='长沙银行', type_main='公告', type_next='采购公告', url='http://www.cscb.cn/home_noticeInfo_bidding_page1.html', selectors=[ Bs4AttrTextField(target='href', css_select='.box_cont .body_area .jd_list ul li a', url_prefix='http://www.cscb.cn/'), Bs4HtmlField(css_select='.body_area .img_wd_lie .sxd_sq', many=False) ] ), Target( bank_name='长沙银行', type_main='公告', type_next='服务公告', url='http://www.cscb.cn/home_noticeInfo_announcement_page1.html', selectors=[ Bs4AttrTextField(target='href', css_select='.box_cont .body_area .jd_list ul li a', url_prefix='http://www.cscb.cn/'), Bs4HtmlField(css_select='.body_area .img_wd_lie .sxd_sq', many=False) ] ), Target( bank_name='长沙银行', type_main='新闻', type_next='来源本行', url='http://www.cscb.cn/home_news_page1.html', selectors=[ Bs4AttrTextField(target='href', css_select='.box_cont .body_area .jd_list ul li a', url_prefix='http://www.cscb.cn/'), Bs4HtmlField(css_select='.body_area .img_wd_lie .sxd_sq', many=False) ] ), Target( bank_name='郑州银行', type_main='公告', type_next='其他公告', url='http://www.zzbank.cn/about/zxdt/zxgg/', selectors=[ Bs4AttrTextField(target='href', css_select='.main_right .message_list .paginationBar ul li a', url_prefix='http://www.zzbank.cn/about/zxdt/zxgg/'), Bs4HtmlField(css_select='.main_right .message_list .TRS_Editor', many=False) ] ), Target( bank_name='郑州银行', type_main='新闻', type_next='来源本行', url='http://www.zzbank.cn/about/zxdt/gsdt/', selectors=[ Bs4AttrTextField(target='href', css_select='.main_right .message_list .paginationBar ul li a', url_prefix='http://www.zzbank.cn/about/zxdt/gsdt/'), Bs4HtmlField(css_select='.main_right .message_list .TRS_Editor', many=False) ] ), Target( bank_name='郑州银行', type_main='新闻', type_next='来源媒体', url='http://www.zzbank.cn/about/zxdt/mtbg/', selectors=[ Bs4AttrTextField(target='href', css_select='.main_right .message_list .paginationBar ul li a', url_prefix='http://www.zzbank.cn/about/zxdt/mtbg/'), Bs4HtmlField(css_select='.main_right .message_list .TRS_Editor', many=False) ] ), Target( bank_name='常熟银行', type_main='公告', type_next='采购公告', url='http://www.csrcbank.com/yw/cgxx/cggg/', selectors=[ Bs4AttrTextField(target='href', css_select='ul#data li a', url_prefix='http://www.csrcbank.com/yw/cgxx/cggg/'), Bs4HtmlField(css_select='#tabstyle .TRS_Editor', many=False) ] ), Target( bank_name='常熟银行', type_main='公告', type_next='其他公告', url='http://www.csrcbank.com/tb/tzgg/', selectors=[ Bs4AttrTextField(target='href', css_select='ul#data li a', url_prefix='http://www.csrcbank.com/tb/tzgg/'), Bs4HtmlField(css_select='#tabstyle .TRS_Editor', many=False) ] ), Target( bank_name='江阴银行', type_main='公告', type_next='其他公告', url='http://www.jybank.com.cn/jybank/index/zygg/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#text .recruitment-con ul li a', url_prefix='http://www.jybank.com.cn/'), Bs4HtmlField(css_select='.news .news_con', many=False) ] ), Target( bank_name='江阴银行', type_main='新闻', type_next='来源本行', url='http://www.jybank.com.cn/jybank/index/xyxw/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#text .recruitment-con ul li a', url_prefix='http://www.jybank.com.cn/'), Bs4HtmlField(css_select='.news .news_con', many=False) ] ), Target( bank_name='青农银行', type_main='公告', type_next='其他公告', url='http://www.qrcb.com.cn/qrcbcms/html/zdgg/', selectors=[ Bs4AttrTextField(target='href', css_select='.main_right .box4 ul li .tit2 a:last-of-type', url_prefix='http://www.qrcb.com.cn/'), Bs4HtmlField(css_select='.main_right .box5 .zhengwen_con1', many=False) ] ), Target( bank_name='苏农银行', type_main='公告', type_next='其他公告', url='http://www.szrcb.com/wjrcb/gdgg/xwgg/index.html?v=%s' % int(time.time() * 1000), selectors=[ Bs4AttrTextField(target='href', css_select='ul li a[href^="/wjrcb/gdgg/xwgg/"]', url_prefix='http://www.szrcb.com/'), Bs4HtmlField(css_select='.bg-ff .main-lc .lccpbox .lbright .ggpad', many=False) ] ), Target( bank_name='苏农银行', type_main='新闻', type_next='来源本行', url='http://www.szrcb.com/wjrcb/gdgg/wjxw/index.html?v=%s' % int(time.time() * 1000), selectors=[ Bs4AttrTextField(target='href', css_select='ul li a[href^="/wjrcb/gdgg/wjxw/"]', url_prefix='http://www.szrcb.com/'), Bs4HtmlField(css_select='.bg-ff .main-lc .lccpbox .lbright .ggpad', many=False) ] ), Target( bank_name='无锡银行', type_main='公告', type_next='其他公告', url='http://www.wrcb.com.cn/website/homepage/bank_notice_news_list/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#frmList .yhkyw_box .content_p table tr td:first-of-type a', url_prefix='http://www.wrcb.com.cn/'), Bs4HtmlField(css_select='.main_r .yhkyw_box', many=False) ] ), Target( bank_name='无锡银行', type_main='新闻', type_next='来源本行', url='http://www.wrcb.com.cn/website/homepage/bank_news_list/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='#frmList .yhkyw_box .content_p table tr td:first-of-type a', url_prefix='http://www.wrcb.com.cn/'), Bs4HtmlField(css_select='.main_r .yhkyw_box', many=False) ] ), Target( bank_name='张家港行', type_main='公告', type_next='其他公告', url='http://www.zrcbank.com/', selectors=[ Bs4AttrField(target='onclick', name='div', attrs={'onclick': re.compile(r'window\.location\.href=')}, string='重要公告', callback='parse_zrcbank', many=False), Bs4AttrTextField(target='href', css_select='#divdemo ul li a', url_prefix='http://www.zrcbank.com/'), Bs4HtmlField(css_select='#divdemo', many=False) ] ), Target( bank_name='张家港行', type_main='新闻', type_next='来源本行', url='http://www.zrcbank.com/', selectors=[ Bs4AttrField(target='onclick', name='div', attrs={'onclick': re.compile(r'window\.location\.href=')}, string='银行新闻', callback='parse_zrcbank', many=False), Bs4AttrTextField(target='href', css_select='#divdemo ul li a', url_prefix='http://www.zrcbank.com/'), Bs4HtmlField(css_select='#divdemo', many=False) ] ), Target( bank_name='紫金银行', type_main='公告', type_next='其他公告', url='http://www.zjrcbank.com/zygg/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='.RightSidebar .mianContent ul li a', url_prefix='http://www.zjrcbank.com/'), Bs4HtmlField(css_select='.RightSidebar .mianContent .News_Info .txt_info', many=False) ] ), Target( bank_name='紫金银行', type_main='公告', type_next='采购公告', url='http://www.zjrcbank.com/ytcggg/index.html', selectors=[ Bs4AttrTextField(target='href', css_select='.RightSidebar .mianContent ul li a', url_prefix='http://www.zjrcbank.com/'), Bs4HtmlField(css_select='.RightSidebar .mianContent .News_Info .txt_info', many=False) ] ), }
39.517803
140
0.499382
3,999
38,846
4.677169
0.11978
0.065922
0.062874
0.145958
0.894621
0.882004
0.866232
0.833779
0.787265
0.690334
0
0.015592
0.359394
38,846
982
141
39.558045
0.736026
0.004479
0
0.745493
0
0.022269
0.29364
0.016553
0
0
0
0
0
1
0
false
0
0.009544
0
0.015907
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
87d42f7cf20bbeb58b97696c7c30d439f620da4e
4,668
py
Python
tests/hash.py
ChenghaoMou/embeddings
e63c2f2f4a688302de37bb8ccfd37a0170e2c374
[ "MIT" ]
12
2021-04-18T02:32:55.000Z
2021-12-19T13:49:23.000Z
tests/hash.py
ChenghaoMou/embeddings
e63c2f2f4a688302de37bb8ccfd37a0170e2c374
[ "MIT" ]
1
2021-07-04T09:06:34.000Z
2021-07-25T03:45:43.000Z
tests/hash.py
ChenghaoMou/embeddings
e63c2f2f4a688302de37bb8ccfd37a0170e2c374
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2021-04-17 08:29:54 # @Author : Chenghao Mou (mouchenghao@gmail.com) import pytest from text_embeddings.hash import CANINETokenizer, PQRNNTokenizer from transformers.tokenization_utils_base import PaddingStrategy @pytest.mark.parametrize( ( "text_pair", "add_special_tokens", "stride", "padding", "truncation", "return_attention_mask", "return_special_tokens_mask", "return_length", ), [ (True, True, 5, "longest", "longest_first", True, True, True), (True, True, 5, "longest", "longest_first", True, True, False), (True, True, 5, "longest", "longest_first", True, False, True), (True, True, 5, "longest", "longest_first", False, True, True), (True, False, 5, "longest", "longest_first", True, False, True), (False, False, 5, "longest", "longest_first", True, False, True), ], ) def test_canine_tokenizer( text_pair: bool, add_special_tokens: bool, stride: int, padding, truncation, return_attention_mask, return_special_tokens_mask, return_length, ): data = [ "Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world!", "Hóla!", "你好,世界!", ] embedder = CANINETokenizer(hash_size=768, max_length=2048) results = embedder( text=data, text_pair=data if text_pair else None, add_special_tokens=add_special_tokens, stride=stride, padding=padding, return_tensors="pt", truncation=truncation, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, prepend_batch_axis=True, return_overflowing_tokens=False, ) sequence_length = results["input_ids"].shape[1] assert sequence_length <= embedder.max_length if return_special_tokens_mask and add_special_tokens: assert results["special_tokens_mask"].shape == (3, sequence_length) assert results["input_ids"].shape == ( 3, sequence_length, 768, ) # hight is slightly different because of the font if return_length: assert results["length"].shape == (3,) @pytest.mark.parametrize( ( "text_pair", "add_special_tokens", "stride", "padding", "truncation", "return_attention_mask", "return_special_tokens_mask", "return_length", ), [ (True, True, 5, PaddingStrategy.LONGEST, "longest_first", True, True, True), (True, True, 5, PaddingStrategy.LONGEST, "longest_first", True, True, False), (True, True, 5, PaddingStrategy.LONGEST, "longest_first", True, False, True), (True, True, 5, PaddingStrategy.LONGEST, "longest_first", False, True, True), (True, False, 5, PaddingStrategy.LONGEST, "longest_first", True, False, True), (False, False, 5, PaddingStrategy.LONGEST, "longest_first", True, False, True), ], ) def test_pqrnn_tokenizer( text_pair: bool, add_special_tokens: bool, stride: int, padding, truncation, return_attention_mask, return_special_tokens_mask, return_length, ): data = [ "Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world! Hello world!", "Hóla!", "你好,世界!", ] embedder = PQRNNTokenizer(hash_size=768, max_length=512) results = embedder( text=data, text_pair=data if text_pair else None, add_special_tokens=add_special_tokens, stride=stride, padding=padding, return_tensors="pt", truncation=truncation, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, prepend_batch_axis=True, return_overflowing_tokens=False, ) sequence_length = results["input_ids"].shape[1] assert sequence_length <= embedder.max_length if return_special_tokens_mask and add_special_tokens: assert results["special_tokens_mask"].shape == (3, sequence_length) assert results["input_ids"].shape == ( 3, sequence_length, 768, ) # hight is slightly different because of the font if return_length: assert results["length"].shape == (3,) assert results["token_type_ids"].shape == (3, sequence_length)
31.540541
166
0.647815
537
4,668
5.374302
0.182495
0.08316
0.114345
0.15246
0.904712
0.882883
0.882883
0.882883
0.856549
0.68122
0
0.015532
0.241431
4,668
147
167
31.755102
0.799492
0.046487
0
0.746032
0
0.015873
0.192126
0.021147
0
0
0
0
0.071429
1
0.015873
false
0
0.02381
0
0.039683
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
87fd0228ad9805111217954f15a0bc7d2af88b2c
48,262
py
Python
SATModelCount/fireworks/fireworks_speed_test_heatmap.py
jkuck/XORModelCount
1476021ce1d36d36fa3f7b214845a6c7f35a8640
[ "MIT" ]
null
null
null
SATModelCount/fireworks/fireworks_speed_test_heatmap.py
jkuck/XORModelCount
1476021ce1d36d36fa3f7b214845a6c7f35a8640
[ "MIT" ]
null
null
null
SATModelCount/fireworks/fireworks_speed_test_heatmap.py
jkuck/XORModelCount
1476021ce1d36d36fa3f7b214845a6c7f35a8640
[ "MIT" ]
null
null
null
from __future__ import division from sat import SAT import time import os import math from fireworks import Firework, Workflow, FWorker, LaunchPad from fireworks.utilities.fw_utilities import explicit_serialize from fireworks.core.firework import FWAction, FireTaskBase #True: run locally #False: run remotely on cluster TEST_LOCAL = False if TEST_LOCAL: from fireworks.core.rocket_launcher import rapidfire else: from fireworks.queue.queue_launcher import rapidfire from fireworks.user_objects.queue_adapters.common_adapter import CommonAdapter from fw_tutorials.dynamic_wf.fibadd_task import FibonacciAdderTask from cluster_config import HOME_DIRECTORY, MONGODB_USERNAME, MONGODB_PASSWORD from experiment_config import MONGODB_HOST, MONGODB_PORT, MONGODB_NAME import numpy as np # Add the following line to the file ~/.bashrc.user on Atlas: # export PYTHONPATH="/atlas/u/jkuck/XORModelCount/SATModelCount:$PYTHONPATH" # $ source ~/.bashrc.user # $ export PATH=/opt/rh/python27/root/usr/bin:$PATH # $ export LD_LIBRARY_PATH=/opt/rh/python27/root/usr/lib64/:$LD_LIBRARY_PATH # $ PACKAGE_DIR=/atlas/u/jkuck/software # $ export PATH=$PACKAGE_DIR/anaconda2/bin:$PATH # $ export LD_LIBRARY_PATH=$PACKAGE_DIR/anaconda2/local:$LD_LIBRARY_PATH # $ source activate anaconda_venv # $ cd /atlas/u/jkuck/XORModelCount/SATModelCount/fireworks # $ python fireworks_speed_test_heatmap.py NJOBS_QUEUE = 130 #used for RunExperimentBatch MAX_TIME = 360 #max time to run a single SAT problem #used for RunSpecificExperimentBatch MAX_TIMEOUT_MULTIPLE = 100 #run at max MAX_TIMEOUT_MULTIPLE*unperturbed runtime m_ranges = {#'c432.isc': range(25, 42), #log_2(Z) = 36.1 'c432.isc': range(25, 46), #log_2(Z) = 36.1 'c499.isc': range(30, 51), #log_2(Z) = 41.0 'c880.isc': range(50, 71), #log_2(Z) = 60.0 'c1355.isc': range(30, 51), #log_2(Z) = 41.0 'c1908.isc': range(20, 44), #log_2(Z) = 33.0 'c2670.isc': range(220, 265), #log_2(Z) = 233 'sat-grid-pbl-0010.cnf': range(65, 95), #log_2(Z) = 78.9 'sat-grid-pbl-0015.cnf': range(170, 210), #log_2(Z) = 180.9 'sat-grid-pbl-0020.cnf': range(310, 350), #log_2(Z) = 318 'ra.cnf': range(920, 1000), #log_2(Z) = 951.0 'tire-1.cnf': range(20, 40), #log_2(Z) = 29.4 #range(27, 32), #range(20, 40), 'tire-2.cnf': range(30, 55), #log_2(Z) = 39.4 #range(27, 32), #range(20, 40), 'tire-3.cnf': range(25, 55), #log_2(Z) = 37.7 #range(27, 32), #range(20, 40), 'tire-4.cnf': range(35, 60), #log_2(Z) = 46.6 #range(27, 32), #range(20, 40), 'log-1.cnf': range(60, 85), #log_2(Z) = 69.0 'log-2.cnf': range(30, 45), #log_2(Z) = 34.9 'lang12.cnf': range(10, 26), #log_2(Z) = 'hypercube.cnf': range(80, 100), #log_2(Z) = 90 'hypercube1.cnf': range(40, 60), #log_2(Z) = 50 'hypercube2.cnf': range(1, 20), #log_2(Z) = 10 'hypercube3.cnf': range(1, 30), #log_2(Z) = 10 'hypercube4.cnf': range(10, 40), #log_2(Z) = 20 'hypercube5.cnf': range(40, 70), #log_2(Z) = 50 'hypercube6.cnf': range(90, 120), #log_2(Z) = 100 'hypercube7.cnf': range(490, 530), #log_2(Z) = 500 } if TEST_LOCAL: f_ranges = {'c432.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], #'c432.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 7)], #'c432.isc': [.0001, .001], 'c499.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'lang12.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'c880.isc': [i/1000.0 for i in range(3,10)] + [i/100.0 for i in range(1, 50)], 'c1355.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'c1908.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'c2670.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'sat-grid-pbl-0010.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'sat-grid-pbl-0015.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'sat-grid-pbl-0020.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'ra.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'tire-1.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'tire-2.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'tire-3.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'tire-4.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'log-1.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'log-2.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube1.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube2.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube3.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube4.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube5.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube6.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], 'hypercube7.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 50)], } else: f_ranges = {'c432.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], #'c432.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 7)], #'c432.isc': [.0001, .001], 'c499.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'lang12.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'c880.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'c1355.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'c1908.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'c2670.isc': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'sat-grid-pbl-0010.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'sat-grid-pbl-0015.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'sat-grid-pbl-0020.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'ra.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'tire-1.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'tire-2.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'tire-3.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'tire-4.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'log-1.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'log-2.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube1.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube2.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube3.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube4.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube5.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube6.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], 'hypercube7.cnf': [i/1000.0 for i in range(1,10)] + [i/100.0 for i in range(1, 10)], } #logger = open('heatmap_result_moreModels2/speed=%d.txt' % (m), "w") @explicit_serialize class RunExperimentBatch(FireTaskBase): def run_task(self, fw_spec): RESULTS_DIRECTORY = '/atlas/u/jkuck/XORModelCount/SATModelCount/fireworks/heatmap_result_fireworksK3' if not os.path.exists(RESULTS_DIRECTORY): os.makedirs(RESULTS_DIRECTORY) #for dup_copies in [0, 1, 3, 7, 15, 31, 63, 127]: for dup_copies in [0]: #for dup_copies in [7, 15, 31, 63, 127]: # for f in [f*(dup_copies+1) for f in f_ranges[fw_spec['problem_name']]]: for f in f_ranges[fw_spec['problem_name']]: # for m in m_ranges[fw_spec['problem_name']]: for m in [m*(dup_copies+1) for m in m_ranges[fw_spec['problem_name']]]: for repeat in range(fw_spec['repeats']): print fw_spec['problem_name'], 'dup_copies=', dup_copies RUN_BLOCK_DIAG = True if RUN_BLOCK_DIAG: #block diagonal, deterministic 1's on block filename = '%s/blockDiagDeterministic_speed_REPEATS=%d_%s_duplicates=%d_expIdx=%d.txt'%(RESULTS_DIRECTORY, fw_spec['repeats'], fw_spec['problem_name'].split('.')[0], dup_copies, fw_spec['experiment_idx']) if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not logger = open(filename, append_write) sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) sat.add_regular_constraints_constantF_permuted(m=m, f=f, f_block=1.0, permute=False, k=3) start_time = time.time() (outcome, empirical_density) = sat.solve(MAX_TIME) elapsed_time = time.time() - start_time logger.write("f %f time %f m %d solution %s\n" % (f, elapsed_time, m, outcome)) logger.close() print("f = %.3f, time=%.2f, m=%d, solution=%s" % (f, elapsed_time, m, outcome)) #block diagonal, sample 1's on block w/ probability 1-f filename = '%s/blockDiag1MinusF_speed_REPEATS=%d_%s_duplicates=%d_expIdx=%d.txt'%(RESULTS_DIRECTORY, fw_spec['repeats'], fw_spec['problem_name'].split('.')[0], dup_copies, fw_spec['experiment_idx']) if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not logger = open(filename, append_write) sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) sat.add_regular_constraints_constantF_permuted(m=m, f=f, f_block=1.0-f, permute=False, k=3) start_time = time.time() (outcome, empirical_density) = sat.solve(MAX_TIME) elapsed_time = time.time() - start_time logger.write("f %f time %f m %d solution %s\n" % (f, elapsed_time, m, outcome)) logger.close() print("f = %.3f, time=%.2f, m=%d, solution=%s" % (f, elapsed_time, m, outcome)) RUN_PERMUTED_BLOCK_DIAG = True if RUN_PERMUTED_BLOCK_DIAG: #permuted block diagonal, deterministic 1's on block filename = '%s/permutedBlockDiagDeterministic_speed_REPEATS=%d_%s_duplicates=%d_expIdx=%d.txt'%(RESULTS_DIRECTORY, fw_spec['repeats'], fw_spec['problem_name'].split('.')[0], dup_copies, fw_spec['experiment_idx']) if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not logger = open(filename, append_write) sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) sat.add_regular_constraints_constantF_permuted(m=m, f=f, f_block=1.0, permute=True, k=3) start_time = time.time() (outcome, empirical_density) = sat.solve(MAX_TIME) elapsed_time = time.time() - start_time logger.write("f %f time %f m %d solution %s\n" % (f, elapsed_time, m, outcome)) logger.close() print("f = %.3f, time=%.2f, m=%d, solution=%s" % (f, elapsed_time, m, outcome)) #permuted block diagonal, sample 1's on block w/ probability 1-f filename = '%s/permutedBlockDiag1MinusF_speed_REPEATS=%d_%s_duplicates=%d_expIdx=%d.txt'%(RESULTS_DIRECTORY, fw_spec['repeats'], fw_spec['problem_name'].split('.')[0], dup_copies, fw_spec['experiment_idx']) if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not logger = open(filename, append_write) sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) sat.add_regular_constraints_constantF_permuted(m=m, f=f, f_block=1.0-f, permute=True, k=3) start_time = time.time() (outcome, empirical_density) = sat.solve(MAX_TIME) elapsed_time = time.time() - start_time logger.write("f %f time %f m %d solution %s\n" % (f, elapsed_time, m, outcome)) logger.close() print("f = %.3f, time=%.2f, m=%d, solution=%s" % (f, elapsed_time, m, outcome)) RUN_PERMUTATION = False if RUN_PERMUTATION: #permutation filename = '%s/pspeed_REPEATS=%d_%s_duplicates=%d_expIdx=%d.txt'%(RESULTS_DIRECTORY, fw_spec['repeats'], fw_spec['problem_name'].split('.')[0], dup_copies, fw_spec['experiment_idx']) if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not logger = open(filename, append_write) sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) sat.add_permutation_constraints(m, f) start_time = time.time() (outcome, empirical_density) = sat.solve(MAX_TIME) elapsed_time = time.time() - start_time logger.write("f %f time %f m %d solution %s\n" % (f, elapsed_time, m, outcome)) logger.close() print("f = %.3f, time=%.2f, m=%d, solution=%s" % (f, elapsed_time, m, outcome)) #original filename = '%s/speed_REPEATS=%d_%s_duplicates=%d_expIdx=%d.txt'%(RESULTS_DIRECTORY, fw_spec['repeats'], fw_spec['problem_name'].split('.')[0], dup_copies, fw_spec['experiment_idx']) if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not logger = open(filename, append_write) sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) sat.add_parity_constraints(m, f) # sat.add_regular_constraints(m, f) # sat.add_permutation_constraints(m, f) start_time = time.time() (outcome, empirical_density) = sat.solve(MAX_TIME) elapsed_time = time.time() - start_time logger.write("f %f time %f m %d solution %s\n" % (f, elapsed_time, m, outcome)) logger.close() print("f = %.3f, time=%.2f, m=%d, solution=%s" % (f, elapsed_time, m, outcome)) #logger = open('heatmap_result_moreModels2/speed=%d.txt' % (m), "w") @explicit_serialize class RunSpecificExperimentBatch(FireTaskBase): def run_task(self, fw_spec): RESULTS_DIRECTORY = '/atlas/u/jkuck/XORModelCount/SATModelCount/fireworks/slurm_postUAI1/specific_MF_vals/%s' % fw_spec['problem_name'].split('.')[0] if not os.path.exists(RESULTS_DIRECTORY): os.makedirs(RESULTS_DIRECTORY) filename = '%s/f_block=%s_permute=%s_k=%s_allOnesConstraint=%s_adjustF=%s_changeVars=%s_useMIS=%s_REPEATS=%d_expIdx=%d.txt'%\ (RESULTS_DIRECTORY, fw_spec['f_block'], fw_spec['permute'], fw_spec['k'], fw_spec['ADD_CONSTRAINT_ALL_ONES'],\ fw_spec['adjust_f'], fw_spec['change_var_names'], fw_spec['use_MIS'], fw_spec['repeats'], fw_spec['experiment_idx']) if TEST_LOCAL: REPEATS = 1 else: REPEATS = 100 all_times = [] for i in range(REPEATS): sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=0) (outcome, empirical_density) = sat.solve(3600) elapsed_time = outcome[1] all_times.append(elapsed_time) assert(empirical_density == 0), empirical_density logger = open(filename, 'w') logger.write('mean_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.mean(all_times))) logger.write('median_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.median(all_times))) logger.write('max_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.max(all_times))) logger.write('min_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.min(all_times))) logger.write("MAX_TIMEOUT_MULTIPLE= %d\n" % MAX_TIMEOUT_MULTIPLE) logger.close() unperturbed_runtime = np.median(all_times) #for dup_copies in [0, 1, 3, 7, 15, 31, 63, 127]: for dup_copies in [0]: #for dup_copies in [7, 15, 31, 63, 127]: # for f in [f*(dup_copies+1) for f in f_ranges[fw_spec['problem_name']]]: for f in f_ranges[fw_spec['problem_name']]: # for m in m_ranges[fw_spec['problem_name']]: quit_m_early = False last_m_val = -1 for m in [m*(dup_copies+1) for m in m_ranges[fw_spec['problem_name']]]: #compute f such that any construction has the density specified by f, not probability of flipping specified by f if fw_spec['adjust_f'] == True: sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=0) if fw_spec['use_MIS'] == True: variable_MIS = get_variable_subset(fw_spec['problem_name']) #minimal independent support https://link.springer.com/content/pdf/10.1007%2Fs10601-015-9204-z.pdf N = len(variable_MIS) else: N = sat.n if fw_spec['ADD_CONSTRAINT_ALL_ONES']: m_effective = m - 1 else: m_effective = m if fw_spec['k']==None: cur_k = N/m elif fw_spec['k'] == 'maxConstant': cur_k = np.floor(N/m) else: cur_k = fw_spec['k'] k_density = cur_k/N print 'N=', N, 'm=', m, "fw_spec['k']=", fw_spec['k'], 'k_density=', k_density #compute the density of ones from k: if k_density > f: #we need to decrease k cur_k = np.floor(f*N) print "changed cur_k=", cur_k if fw_spec['f_block'] == '1minusF': f_prime = (f*N - cur_k)/(N - 2*cur_k) print 'f_prime=', f_prime assert(abs((1 - f_prime)*cur_k + f_prime*(N - cur_k) - N*f) < .0001), (f_prime, cur_k, N) else: assert(fw_spec['f_block'] == '1') f_prime = (f*N - cur_k)/(N - cur_k) print 'f_prime=', f_prime assert(abs(cur_k + f_prime*(N - cur_k) - N*f) < .0001), (f_prime, cur_k, N) else: f_prime = f failures = 0 for repeat in range(fw_spec['repeats']): logger = open(filename, 'a') sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) if fw_spec['f_block'] == '1': sat.add_regular_constraints_constantF_permuted(m=m, f=f_prime, f_block=1.0, permute=fw_spec['permute'], k=cur_k,\ ADD_CONSTRAINT_ALL_ONES=fw_spec['ADD_CONSTRAINT_ALL_ONES'], change_var_names=fw_spec['change_var_names']) else: assert(fw_spec['f_block'] == '1minusF') sat.add_regular_constraints_constantF_permuted(m=m, f=f_prime, f_block=1.0-f_prime, permute=fw_spec['permute'], k=cur_k,\ ADD_CONSTRAINT_ALL_ONES=fw_spec['ADD_CONSTRAINT_ALL_ONES'], change_var_names=fw_spec['change_var_names']) start_time = time.time() (outcome, empirical_density) = sat.solve(unperturbed_runtime*MAX_TIMEOUT_MULTIPLE) elapsed_time = time.time() - start_time if outcome == None: failures += 1 logger.write("f_prime %f f %f cur_k %f n %d time %f m %d solution %s empirical_density %f\n" % (f_prime, f, cur_k, N, elapsed_time, m, outcome, empirical_density)) logger.close() print("f_prime %f f %f cur_k %f n %d time %f m %d solution %s empirical_density %f\n" % (f_prime, f, cur_k, N, elapsed_time, m, outcome, empirical_density)) if failures == fw_spec['repeats']: quit_m_early = True last_m_val = m break if quit_m_early: #start with largest m and iterate down for m in reversed([m*(dup_copies+1) for m in m_ranges[fw_spec['problem_name']]]): if m <= last_m_val: break #compute f such that any construction has the density specified by f, not probability of flipping specified by f if fw_spec['adjust_f'] == True: sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=0) N = sat.n if fw_spec['ADD_CONSTRAINT_ALL_ONES']: m_effective = m - 1 else: m_effective = m if fw_spec['k']==None: cur_k = N/m elif fw_spec['k'] == 'maxConstant': cur_k = np.floor(N/m) else: cur_k = fw_spec['k'] k_density = cur_k/N print 'N=', N, 'm=', m, "fw_spec['k']=", fw_spec['k'], 'k_density=', k_density #compute the density of ones from k: if k_density > f: #we need to decrease k cur_k = np.floor(f*N) print "changed cur_k=", cur_k if fw_spec['f_block'] == '1minusF': f_prime = (f*N - cur_k)/(N - 2*cur_k) print 'f_prime=', f_prime assert(abs((1 - f_prime)*cur_k + f_prime*(N - cur_k) - N*f) < .0001), (f_prime, cur_k, N) else: assert(fw_spec['f_block'] == '1') f_prime = (f*N - cur_k)/(N - cur_k) print 'f_prime=', f_prime assert(abs(cur_k + f_prime*(N - cur_k) - N*f) < .0001), (f_prime, cur_k, N) else: f_prime = f failures = 0 for repeat in range(fw_spec['repeats']): logger = open(filename, 'a') sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) if fw_spec['f_block'] == '1': sat.add_regular_constraints_constantF_permuted(m=m, f=f_prime, f_block=1.0, permute=fw_spec['permute'], k=cur_k,\ ADD_CONSTRAINT_ALL_ONES=fw_spec['ADD_CONSTRAINT_ALL_ONES'], change_var_names=fw_spec['change_var_names']) else: assert(fw_spec['f_block'] == '1minusF') sat.add_regular_constraints_constantF_permuted(m=m, f=f_prime, f_block=1.0-f_prime, permute=fw_spec['permute'], k=cur_k,\ ADD_CONSTRAINT_ALL_ONES=fw_spec['ADD_CONSTRAINT_ALL_ONES'], change_var_names=fw_spec['change_var_names']) start_time = time.time() (outcome, empirical_density) = sat.solve(unperturbed_runtime*MAX_TIMEOUT_MULTIPLE) elapsed_time = time.time() - start_time if outcome == None: failures += 1 logger.write("f_prime %f f %f cur_k %f n %d time %f m %d solution %s empirical_density %f\n" % (f_prime, f, cur_k, N, elapsed_time, m, outcome, empirical_density)) logger.close() print("f_prime %f f %f cur_k %f n %d time %f m %d solution %s empirical_density %f\n" % (f_prime, f, cur_k, N, elapsed_time, m, outcome, empirical_density)) if failures == fw_spec['repeats']: break @explicit_serialize class RunSpecificMFValsExperimentBatch(FireTaskBase): def run_task(self, fw_spec): RESULTS_DIRECTORY = '/atlas/u/jkuck/XORModelCount/SATModelCount/fireworks/slurm_postUAI1/specific_MF_vals/%s' % fw_spec['problem_name'].split('.')[0] if not os.path.exists(RESULTS_DIRECTORY): os.makedirs(RESULTS_DIRECTORY) filename = '%s/f_block=%s_permute=%s_k=%s_allOnesConstraint=%s_adjustF=%s_changeVars=%s_REPEATS=%d_expIdx=%d.txt'%\ (RESULTS_DIRECTORY, fw_spec['f_block'], fw_spec['permute'], fw_spec['k'], fw_spec['ADD_CONSTRAINT_ALL_ONES'],\ fw_spec['adjust_f'], fw_spec['change_var_names'], fw_spec['repeats'], fw_spec['experiment_idx']) if TEST_LOCAL: REPEATS = 1 else: REPEATS = 100 all_times = [] for i in range(REPEATS): sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=0) (outcome, empirical_density) = sat.solve(3600) elapsed_time = outcome[1] all_times.append(elapsed_time) assert(empirical_density == 0), empirical_density logger = open(filename, 'w') logger.write('mean_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.mean(all_times))) logger.write('median_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.median(all_times))) logger.write('max_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.max(all_times))) logger.write('min_unperturbed_run_time_%f_trials: %f\n' % (REPEATS, np.min(all_times))) logger.write("MAX_TIMEOUT_MULTIPLE= %d\n" % MAX_TIMEOUT_MULTIPLE) logger.close() unperturbed_runtime = np.median(all_times) #for dup_copies in [0, 1, 3, 7, 15, 31, 63, 127]: for dup_copies in [0]: #for dup_copies in [7, 15, 31, 63, 127]: # for f in [f*(dup_copies+1) for f in f_ranges[fw_spec['problem_name']]]: for (m, f) in [(61, 0.02), (64, 0.03), (67, 0.02), (62, 0.02), (63, 0.02), (66, 0.03), (65, 0.02), (67, 0.03), (70, 0.03)]: # for m in m_ranges[fw_spec['problem_name']]: if fw_spec['adjust_f'] == True: sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=0) N = sat.n if fw_spec['ADD_CONSTRAINT_ALL_ONES']: m_effective = m - 1 else: m_effective = m if fw_spec['k']==None: cur_k = N/m elif fw_spec['k'] == 'maxConstant': cur_k = np.floor(N/m) else: cur_k = fw_spec['k'] k_density = cur_k/N print 'N=', N, 'm=', m, "fw_spec['k']=", fw_spec['k'], 'k_density=', k_density #compute the density of ones from k: if k_density > f: #we need to decrease k cur_k = np.floor(f*N) print "changed cur_k=", cur_k if fw_spec['f_block'] == '1minusF': f_prime = (f*N - cur_k)/(N - 2*cur_k) print 'f_prime=', f_prime assert(abs((1 - f_prime)*cur_k + f_prime*(N - cur_k) - N*f) < .0001), (f_prime, cur_k, N) else: assert(fw_spec['f_block'] == '1') f_prime = (f*N - cur_k)/(N - cur_k) print 'f_prime=', f_prime assert(abs(cur_k + f_prime*(N - cur_k) - N*f) < .0001), (f_prime, cur_k, N) else: f_prime = f for repeat in range(fw_spec['repeats']): logger = open(filename, 'a') sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=dup_copies) if fw_spec['f_block'] == '1': sat.add_regular_constraints_constantF_permuted(m=m, f=f_prime, f_block=1.0, permute=fw_spec['permute'], k=cur_k,\ ADD_CONSTRAINT_ALL_ONES=fw_spec['ADD_CONSTRAINT_ALL_ONES'], change_var_names=fw_spec['change_var_names']) else: assert(fw_spec['f_block'] == '1minusF') sat.add_regular_constraints_constantF_permuted(m=m, f=f_prime, f_block=1.0-f_prime, permute=fw_spec['permute'], k=cur_k,\ ADD_CONSTRAINT_ALL_ONES=fw_spec['ADD_CONSTRAINT_ALL_ONES'], change_var_names=fw_spec['change_var_names']) start_time = time.time() (outcome, empirical_density) = sat.solve(unperturbed_runtime*MAX_TIMEOUT_MULTIPLE) elapsed_time = time.time() - start_time logger.write("f_prime %f f %f cur_k %f n %d time %f m %d solution %s empirical_density %f\n" % (f_prime, f, cur_k, N, elapsed_time, m, outcome, empirical_density)) logger.close() print("f_prime %f f %f cur_k %f n %d time %f m %d solution %s empirical_density %f\n" % (f_prime, f, cur_k, N, elapsed_time, m, outcome, empirical_density)) @explicit_serialize class CheckTimingExperiment(FireTaskBase): def run_task(self, fw_spec): RESULTS_DIRECTORY = '/atlas/u/jkuck/XORModelCount/SATModelCount/fireworks/slurm_postUAI1/check_timing_consistency/%s' % fw_spec['problem_name'].split('.')[0] if not os.path.exists(RESULTS_DIRECTORY): os.makedirs(RESULTS_DIRECTORY) filename = '%s/expIdx=%d.txt'%\ (RESULTS_DIRECTORY, fw_spec['experiment_idx']) REPEATS = 100 all_times = [] for i in range(REPEATS): sat = SAT("/atlas/u/jkuck/low_density_parity_checks/SAT_problems_cnf/%s"%fw_spec['problem_name'], verbose=False, instance_id=fw_spec['problem_name'], duplicate=0) (outcome, empirical_density) = sat.solve(3600) elapsed_time = outcome[1] all_times.append(elapsed_time) assert(empirical_density == 0), empirical_density logger = open(filename, 'w') logger.write('mean time over 100: %f\n' % np.mean(all_times)) logger.write('median time over 100: %f\n' % np.median(all_times)) logger.write('max time over 100: %f\n' % np.max(all_times)) logger.write('min time over 100: %f\n\n' % np.min(all_times)) logger.write('mean time over 10: %f\n' % np.mean(all_times[0:10])) logger.write('median time over 10: %f\n' % np.median(all_times[0:10])) logger.write('max time over 10: %f\n' % np.max(all_times[0:10])) logger.write('min time over 10: %f\n' % np.min(all_times[0:10])) logger.close() def create_launchpad(): with open('./my_launchpad.yaml', 'w') as f: f.write('host: %s\n' % MONGODB_HOST) f.write('port: %d\n' % MONGODB_PORT) f.write('name: %s\n' % MONGODB_NAME) f.write('username: %s\n' % MONGODB_USERNAME) f.write('password: %s\n' % MONGODB_PASSWORD) f.write('logdir: null\n') f.write('strm_lvl: INFO\n') def run_experiment(): ''' ''' # write new launchpad file create_launchpad() # set up the LaunchPad and reset it launchpad = LaunchPad(host=MONGODB_HOST, port=MONGODB_PORT, name=MONGODB_NAME, username=MONGODB_USERNAME, password=MONGODB_PASSWORD, logdir=None, strm_lvl='INFO', user_indices=None, wf_user_indices=None, ssl_ca_file=None) launchpad.reset('', require_password=False) all_fireworks = [] #PROBLEM_NAMES = ['hypercube.cnf', 'hypercube1.cnf', 'hypercube2.cnf', 'c499.isc', 'c432.isc', 'tire-1.cnf', 'tire-2.cnf', 'tire-3.cnf', 'tire-4.cnf', 'lang12.cnf', 'c880.isc', 'c1355.isc', 'c1908.isc', 'c2670.isc', 'sat-grid-pbl-0010.cnf', 'sat-grid-pbl-0015.cnf', 'sat-grid-pbl-0020.cnf', 'log-1.cnf', 'log-2.cnf', 'ra.cnf'] #PROBLEM_NAMES = ['c432.isc'] #PROBLEM_NAMES = ['hypercube3.cnf']#, 'hypercube4.cnf', 'hypercube5.cnf', 'hypercube6.cnf', 'hypercube7.cnf'] #PROBLEM_NAMES = ['hypercube3.cnf']#, 'hypercube4.cnf', 'hypercube5.cnf', 'hypercube6.cnf', 'hypercube7.cnf', 'hypercube.cnf', 'hypercube1.cnf', 'hypercube2.cnf', 'c499.isc', 'c432.isc', 'tire-1.cnf', 'tire-2.cnf', 'tire-3.cnf', 'tire-4.cnf', 'lang12.cnf', 'c880.isc', 'c1355.isc', 'c1908.isc', 'c2670.isc', 'sat-grid-pbl-0010.cnf', 'sat-grid-pbl-0015.cnf', 'sat-grid-pbl-0020.cnf', 'log-1.cnf', 'log-2.cnf', 'ra.cnf'] PROBLEM_NAMES = ['c499.isc', 'c432.isc', 'tire-1.cnf', 'tire-2.cnf', 'tire-3.cnf', 'tire-4.cnf', 'lang12.cnf', 'c880.isc', 'hypercube.cnf', 'hypercube1.cnf', 'hypercube2.cnf', 'c1355.isc', 'c1908.isc', 'c2670.isc', 'sat-grid-pbl-0010.cnf', 'sat-grid-pbl-0015.cnf', 'sat-grid-pbl-0020.cnf', 'log-1.cnf', 'log-2.cnf', 'ra.cnf'] #PROBLEM_NAMES = ['c880.isc'] PROBLEM_NAMES = ['log-1.cnf'] REPEATS_OF_EXPERIMENT = 10 for problem_name in PROBLEM_NAMES: for repeats_per_experiment in [10]: for experiment_idx in range(REPEATS_OF_EXPERIMENT): #repeat the same experiment this many times # for ADD_CONSTRAINT_ALL_ONES in [True, False]: # for (f_block, permute, k) in [('1', True, None), ('1minusF', True, None), ('1', False, None), ('1minusF', False, None),\ # ('1', True, 3), ('1minusF', True, 3), ('1', False, 3), ('1minusF', False, 3),\ # ('1', True, 1), ('1minusF', True, 1), ('1', False, 1), ('1minusF', False, 1), # ('1', False, 0)] for (f_block, permute, k, ADD_CONSTRAINT_ALL_ONES, adjust_f, change_var_names) in \ [('1minusF', True, 'maxConstant', False, True, False),\ ('1minusF', True, 'maxConstant', False, True, True),\ ('1minusF', False, 'maxConstant', False, True, False),\ ('1', False, 0, False, True, False)]: # [('1minusF', True, 'maxConstant', False, True),\ # ('1', False, 0, False, True),\ # ('1', True, None, False, True), ('1', False, None, False, True)]: #[('1minusF', True, 'maxConstant', False), ('1minusF', True, 'maxConstant', True),\ #('1', True, None, True), ('1', False, None, True)]: #[('1minusF', True, None, False), ('1minusF', False, None, False), ('1minusF', True, 3, True),\ # ('1minusF', True, 3, False),\ # ('1', True, 1, False),\ # ('1', False, 0, False), ('1', False, 0, True)]: cur_spec = {'problem_name': problem_name, 'repeats': repeats_per_experiment, 'experiment_idx': experiment_idx, 'f_block': f_block, 'permute': permute, 'k': k, 'ADD_CONSTRAINT_ALL_ONES':ADD_CONSTRAINT_ALL_ONES, #True: #compute f such that the original matrix construction using iid entries will #have the same expected number of 1's as when floor(n/m) entries are added with #probability (1-f) #False: don't adjust f's #'expectedNum1s': f denies the expected number of 1's for all methods 'adjust_f': adjust_f, 'change_var_names': change_var_names, } #all_fireworks.append(Firework(RunSpecificExperimentBatch(), spec=cur_spec)) all_fireworks.append(Firework(RunSpecificMFValsExperimentBatch(), spec=cur_spec)) firework_dependencies = {} workflow = Workflow(all_fireworks, firework_dependencies) if TEST_LOCAL: launchpad.add_wf(workflow) rapidfire(launchpad, FWorker()) else: launchpad.add_wf(workflow) qadapter = CommonAdapter.from_file("%s/my_qadapter.yaml" % HOME_DIRECTORY) rapidfire(launchpad, FWorker(), qadapter, launch_dir='.', nlaunches='infinite', njobs_queue=NJOBS_QUEUE, njobs_block=500, sleep_time=None, reserve=False, strm_lvl='INFO', timeout=None, fill_mode=False) def run_check_timing_experiment(): ''' ''' # write new launchpad file create_launchpad() # set up the LaunchPad and reset it launchpad = LaunchPad(host=MONGODB_HOST, port=MONGODB_PORT, name=MONGODB_NAME, username=MONGODB_USERNAME, password=MONGODB_PASSWORD, logdir=None, strm_lvl='INFO', user_indices=None, wf_user_indices=None, ssl_ca_file=None) launchpad.reset('', require_password=False) all_fireworks = [] #PROBLEM_NAMES = ['hypercube.cnf', 'hypercube1.cnf', 'hypercube2.cnf', 'c499.isc', 'c432.isc', 'tire-1.cnf', 'tire-2.cnf', 'tire-3.cnf', 'tire-4.cnf', 'lang12.cnf', 'c880.isc', 'c1355.isc', 'c1908.isc', 'c2670.isc', 'sat-grid-pbl-0010.cnf', 'sat-grid-pbl-0015.cnf', 'sat-grid-pbl-0020.cnf', 'log-1.cnf', 'log-2.cnf', 'ra.cnf'] #PROBLEM_NAMES = ['c432.isc'] #PROBLEM_NAMES = ['hypercube3.cnf']#, 'hypercube4.cnf', 'hypercube5.cnf', 'hypercube6.cnf', 'hypercube7.cnf'] #PROBLEM_NAMES = ['hypercube3.cnf']#, 'hypercube4.cnf', 'hypercube5.cnf', 'hypercube6.cnf', 'hypercube7.cnf', 'hypercube.cnf', 'hypercube1.cnf', 'hypercube2.cnf', 'c499.isc', 'c432.isc', 'tire-1.cnf', 'tire-2.cnf', 'tire-3.cnf', 'tire-4.cnf', 'lang12.cnf', 'c880.isc', 'c1355.isc', 'c1908.isc', 'c2670.isc', 'sat-grid-pbl-0010.cnf', 'sat-grid-pbl-0015.cnf', 'sat-grid-pbl-0020.cnf', 'log-1.cnf', 'log-2.cnf', 'ra.cnf'] #PROBLEM_NAMES = ['c499.isc', 'c432.isc', 'tire-1.cnf', 'tire-2.cnf', 'tire-3.cnf', 'tire-4.cnf', 'lang12.cnf', 'c880.isc', 'hypercube.cnf', 'hypercube1.cnf', 'hypercube2.cnf', 'c1355.isc', 'c1908.isc', 'c2670.isc', 'sat-grid-pbl-0010.cnf', 'sat-grid-pbl-0015.cnf', 'sat-grid-pbl-0020.cnf', 'log-1.cnf', 'log-2.cnf', 'ra.cnf'] #PROBLEM_NAMES = ['c880.isc'] PROBLEM_NAMES = ['tire-4.cnf'] REPEATS_OF_EXPERIMENT = 1000 for problem_name in PROBLEM_NAMES: for experiment_idx in range(REPEATS_OF_EXPERIMENT): #repeat the same experiment this many times cur_spec = {'problem_name': problem_name, 'experiment_idx': experiment_idx, } all_fireworks.append(Firework(CheckTimingExperiment(), spec=cur_spec)) firework_dependencies = {} workflow = Workflow(all_fireworks, firework_dependencies) if TEST_LOCAL: launchpad.add_wf(workflow) rapidfire(launchpad, FWorker()) else: launchpad.add_wf(workflow) qadapter = CommonAdapter.from_file("%s/my_qadapter.yaml" % HOME_DIRECTORY) rapidfire(launchpad, FWorker(), qadapter, launch_dir='.', nlaunches='infinite', njobs_queue=NJOBS_QUEUE, njobs_block=500, sleep_time=None, reserve=False, strm_lvl='INFO', timeout=None, fill_mode=False) if __name__=="__main__": run_experiment() #run_check_timing_experiment() ######################### Fireworks info copied from anothor project ######################### # If the database thinks a firework is still running, but no jobs are running on the cluster, try: # $ lpad detect_lostruns --time 1 --refresh # # If a firework fizzles and you are trying to find the error/output, note the fireworks fw_id # in the online database, then search for this fw_id in the launcher block, e.g.: # $ cd block_2017-11-01-07-30-53-457640 # $ pt 'fw_id: 34' # or on atlas-ws-6 use silver searcher: # $ ag 'fw_id: 34' # #Note, on Atlas before this script: # start a krbscreen session: # $ krbscreen #reattach using $ screen -rx # $ reauth #important so that jobs can be submitted after logging out, enter password # # $ export PATH=/opt/rh/python27/root/usr/bin:$PATH # $ export LD_LIBRARY_PATH=/opt/rh/python27/root/usr/lib64/:$LD_LIBRARY_PATH # $ PACKAGE_DIR=/atlas/u/jkuck/software # $ export PATH=$PACKAGE_DIR/anaconda2/bin:$PATH # $ export LD_LIBRARY_PATH=$PACKAGE_DIR/anaconda2/local:$LD_LIBRARY_PATH # $ source activate anaconda_venv # $ cd /atlas/u/jkuck/rbpf_fireworks/ # # To install anaconda packages run, e.g.: # $ conda install -c matsci fireworks=1.3.9 # #May need to run $ kinit -r 30d # # Add the following line to the file ~/.bashrc.user on Atlas: # export PYTHONPATH="/atlas/u/jkuck/rbpf_fireworks:$PYTHONPATH" # Weird, but to run commands like "lpad -l my_launchpad.yaml get_fws", # add the following line to the file ~/.bashrc.user on Atlas: # export PYTHONPATH="${PYTHONPATH}:/atlas/u/jkuck/rbpf_fireworks/KITTI_helpers/" # # To install cvxpy on atlas run (hopefully): # #$ export PATH=/opt/rh/python27/root/usr/bin:$PATH #$ export LD_LIBRARY_PATH=/opt/rh/python27/root/usr/lib64/:$LD_LIBRARY_PATH #$ pip install --user numpy #$ pip install --user cvxpy # # Install pymatgen: #$ pip install --user pymatgen ########################################################################################## # #Note, on Sherlock before this script: #$ ml load python/2.7.5 #$ easy_install-2.7 --user pip #$ export PATH=~/.local/bin:$PATH #$ pip2.7 install --user fireworks #and others #$ pip2.7 install --user filterpy #$ pip2.7 install --user scipy --upgrade #$ pip2.7 install --user munkres #$ pip2.7 install --user pymatgen #$ cd /scratch/users/kuck/rbpf_fireworks/ # # Add the following line to the file ~/.bashrc on Sherlock: # export PYTHONPATH="/scratch/users/kuck/rbpf_fireworks:$PYTHONPATH" # Weird, but to run commands like "lpad -l my_launchpad.yaml get_fws", # add the following line to the file ~/.bashrc.user on Atlas: # export PYTHONPATH="${PYTHONPATH}:/scratch/users/kuck/rbpf_fireworks/KITTI_helpers/" # # # When setting up: # - make cluster_config.py file # - make my_qadapter.yaml file (look at fireworks workflow manager website for info) # # To install cvxpy on sherlock run: # $ pip2.7 install --user cvxpy
62.033419
422
0.542684
6,496
48,262
3.844674
0.078664
0.034595
0.025706
0.047127
0.812012
0.793033
0.777498
0.768408
0.761602
0.755355
0
0.059118
0.316543
48,262
777
423
62.113256
0.698045
0.20513
0
0.649425
0
0.015326
0.170993
0.071425
0
0
0
0
0.028736
0
null
null
0.011494
0.028736
null
null
0.042146
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
e202fc9d430a3f9382d86eedc45b978821ce737d
107
py
Python
materiallibrary/__init__.py
xdze2/heatequation
0d83df665925bd12dcbac4f444420b5ab0245779
[ "MIT" ]
null
null
null
materiallibrary/__init__.py
xdze2/heatequation
0d83df665925bd12dcbac4f444420b5ab0245779
[ "MIT" ]
null
null
null
materiallibrary/__init__.py
xdze2/heatequation
0d83df665925bd12dcbac4f444420b5ab0245779
[ "MIT" ]
1
2020-03-19T17:14:22.000Z
2020-03-19T17:14:22.000Z
from materiallibrary.materiallibrary import * from materiallibrary.airtableimport import airtableimport
17.833333
57
0.869159
9
107
10.333333
0.444444
0.408602
0
0
0
0
0
0
0
0
0
0
0.102804
107
5
58
21.4
0.96875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
ea255700f41fa4ddf3db1d9f70f85f64dc190a6f
6,079
py
Python
tests/infrastructure/test_bperegistry.py
maximzubkov/codeprep
807ee1ea33796b6853c45e9dcb4e866b3f09a5f2
[ "Apache-2.0" ]
33
2020-03-02T23:42:15.000Z
2022-03-18T02:34:32.000Z
tests/infrastructure/test_bperegistry.py
maximzubkov/codeprep
807ee1ea33796b6853c45e9dcb4e866b3f09a5f2
[ "Apache-2.0" ]
10
2020-02-27T13:43:00.000Z
2021-04-21T12:11:44.000Z
tests/infrastructure/test_bperegistry.py
maximzubkov/codeprep
807ee1ea33796b6853c45e9dcb4e866b3f09a5f2
[ "Apache-2.0" ]
9
2020-03-16T14:28:06.000Z
2021-09-30T09:40:56.000Z
# SPDX-FileCopyrightText: 2020 Hlib Babii <hlibbabii@gmail.com> # # SPDX-License-Identifier: Apache-2.0 import os from unittest.mock import patch from codeprep.pipeline.bperegistry import get_max_merges, format_available_merge_list_ids, get_min_merges from codeprep.pipeline.dataset import create_new_id_from PATH_TO_DATASET_BPE_DIR_STUB = os.path.join('/', 'path', 'to', 'dataset', 'bpe', 'dir') PATH_TO_DATASET_STUB = os.path.join('/', 'path', 'to', 'dataset') HLIB_PATH = '/home/hlib/path' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) def test_with_predefined_id(bpe_config_mock): bpe_config_mock.to_suffix.return_value = '' assert create_new_id_from(PATH_TO_DATASET_STUB, bpe_config_mock, 'id23') == 'id23' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={}) def test_no_existing_bpe_codes(mock, bpe_config_mock): bpe_config_mock.to_suffix.return_value = '' assert create_new_id_from(PATH_TO_DATASET_STUB, bpe_config_mock) == 'dataset' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={'dataset': 10, 'dataset4': 20, 'dataset_3': 30}) def test_ids_for_same_dataset_exist(mock, bpe_config_mock): bpe_config_mock.to_suffix.return_value = '' assert create_new_id_from(PATH_TO_DATASET_STUB, bpe_config_mock) == 'dataset_4' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) def test_with_predefined_codes_id(bpe_config_mock): bpe_config_mock.to_suffix.return_value = "" assert create_new_id_from(HLIB_PATH, bpe_config_mock, 'my-id') == 'my-id' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value="") def test_simple(mock, bpe_config_mock): # given bpe_config_mock.to_suffix.return_value = "" assert create_new_id_from(HLIB_PATH, bpe_config_mock) == 'path' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={'path': 1000}) def test_same_path_exists(mock, bpe_config_mock): # given bpe_config_mock.to_suffix.return_value = "" assert create_new_id_from(HLIB_PATH, bpe_config_mock) == 'path_1' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={'path': 1000, 'path_1': 2000}) def test_same_path_and_next_one_exist(mock, bpe_config_mock): # given bpe_config_mock.to_suffix.return_value = "" assert create_new_id_from(HLIB_PATH, bpe_config_mock) == 'path_2' @patch("codeprep.bpepkg.bpe_config.BpeConfig", autospec=True) @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={'path': 1000, 'path_28': 2000}) def test_same_path_and_one_more_exist(mock, bpe_config_mock): # given bpe_config_mock.to_suffix.return_value = "" assert create_new_id_from(HLIB_PATH, bpe_config_mock) == 'path_29' @patch('codeprep.pipeline.bperegistry.os.walk', return_value=iter([('', [], [])])) def test_none(mocked_walk): assert get_max_merges('.') is None @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={}) def test_no_available_merge_lists(bpe_registry_mock): assert format_available_merge_list_ids() == "" @patch('codeprep.pipeline.bperegistry._get_all_custom_bpe_codes_and_max_merges', autospec=True, return_value={"a": 1000, "b": 500}) def test_simple(mock): assert format_available_merge_list_ids() == "a-[1..1000]\nb-[1..500]\n" @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=[]) def test_max_no_folders(mock): assert get_max_merges(PATH_TO_DATASET_BPE_DIR_STUB) is None @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=[]) def test_min_no_folders(mock): assert get_min_merges(PATH_TO_DATASET_BPE_DIR_STUB) is None @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['part_vocab']) def test_max_with_non_number_folder(mock): assert get_max_merges(PATH_TO_DATASET_BPE_DIR_STUB) is None @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['part_vocab']) def test_min_with_non_number_folder(mock): assert get_min_merges(PATH_TO_DATASET_BPE_DIR_STUB) is None @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['10', '20']) def test_max_all_folders_above_limit(mock): assert get_max_merges(PATH_TO_DATASET_BPE_DIR_STUB, 5) is None @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['10', '20']) def test_min_all_folders_below_limit(mock): assert get_min_merges(PATH_TO_DATASET_BPE_DIR_STUB) == 10 @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['10', 'partvocab']) def test_max_one_folder_available(mock): assert get_max_merges(PATH_TO_DATASET_BPE_DIR_STUB) == 10 @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['10', 'partvocab']) def test_min_one_folder_available(mock): assert get_max_merges(PATH_TO_DATASET_BPE_DIR_STUB) == 10 @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['10', '20', '15', '30', 'partvocab']) def test_max_simple(mock): assert get_max_merges(PATH_TO_DATASET_BPE_DIR_STUB, 20) == 20 @patch('codeprep.pipeline.bperegistry._get_all_bpe_merges_dirs', autospec=True, return_value=['10', '20', '15', '30', 'partvocab']) def test_min_simple(mock): assert get_min_merges(PATH_TO_DATASET_BPE_DIR_STUB, 15) == 15
41.074324
113
0.785985
920
6,079
4.725
0.117391
0.066253
0.071774
0.139867
0.856223
0.84334
0.831838
0.78721
0.785829
0.784909
0
0.018994
0.09064
6,079
148
114
41.074324
0.767366
0.019905
0
0.450549
0
0
0.284322
0.243657
0
0
0
0
0.230769
1
0.230769
false
0
0.043956
0
0.274725
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
57a03241c69de6d626409e7b7e4b9aa47e044ab6
1,566
py
Python
Attendance/controllers/get/all_groups.py
MishaVernik/StudentAttendance
4dd5b0b95ed53e801a8138fa7d4313d145efc623
[ "MIT" ]
null
null
null
Attendance/controllers/get/all_groups.py
MishaVernik/StudentAttendance
4dd5b0b95ed53e801a8138fa7d4313d145efc623
[ "MIT" ]
10
2020-06-05T22:54:52.000Z
2022-03-12T00:05:11.000Z
Attendance/controllers/get/all_groups.py
MishaVernik/StudentAttendance
4dd5b0b95ed53e801a8138fa7d4313d145efc623
[ "MIT" ]
null
null
null
import psycopg2 from Attendance.context.sql_connection import get_sql_connection def group_ids(teacher_id): all_group_ids = [] try: connection = get_sql_connection() cursor = connection.cursor() postgre_sql_select_query = 'SELECT id, "group" FROM public.schedule WHERE teacher_id=%s' #print(postgre_sql_select_query) cursor.execute(postgre_sql_select_query, (teacher_id,)) mobile_records = cursor.fetchall() for row in mobile_records: all_group_ids.append(row[0]) except (Exception, psycopg2.DatabaseError) as error: print("Error STUDENTS while doing smth in PostgreSQL", error) student_or_teacher = 1 finally: # closing database connection. cursor.close() connection.close() return all_group_ids def groups(teacher_id): all_groups = [] try: connection = get_sql_connection() cursor = connection.cursor() postgre_sql_select_query = 'SELECT id, "group" FROM public.schedule WHERE teacher_id=%s' #print(postgre_sql_select_query) cursor.execute(postgre_sql_select_query, (teacher_id,)) mobile_records = cursor.fetchall() for row in mobile_records: all_groups.append(row[1]) except (Exception, psycopg2.DatabaseError) as error: print("Error STUDENTS while doing smth in PostgreSQL", error) student_or_teacher = 1 finally: # closing database connection. cursor.close() connection.close() return all_groups
32.625
97
0.672414
185
1,566
5.427027
0.281081
0.053785
0.095618
0.125498
0.824701
0.824701
0.824701
0.824701
0.824701
0.824701
0
0.005927
0.245849
1,566
47
98
33.319149
0.8442
0.076628
0
0.722222
0
0
0.145631
0
0
0
0
0
0
1
0.055556
false
0
0.055556
0
0.166667
0.055556
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
a4b7349bdaba35357407ffeda700e4219d009e81
6,269
py
Python
loldib/getratings/models/NA/na_lulu/na_lulu_mid.py
koliupy/loldib
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
[ "Apache-2.0" ]
null
null
null
loldib/getratings/models/NA/na_lulu/na_lulu_mid.py
koliupy/loldib
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
[ "Apache-2.0" ]
null
null
null
loldib/getratings/models/NA/na_lulu/na_lulu_mid.py
koliupy/loldib
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
[ "Apache-2.0" ]
null
null
null
from getratings.models.ratings import Ratings class NA_Lulu_Mid_Aatrox(Ratings): pass class NA_Lulu_Mid_Ahri(Ratings): pass class NA_Lulu_Mid_Akali(Ratings): pass class NA_Lulu_Mid_Alistar(Ratings): pass class NA_Lulu_Mid_Amumu(Ratings): pass class NA_Lulu_Mid_Anivia(Ratings): pass class NA_Lulu_Mid_Annie(Ratings): pass class NA_Lulu_Mid_Ashe(Ratings): pass class NA_Lulu_Mid_AurelionSol(Ratings): pass class NA_Lulu_Mid_Azir(Ratings): pass class NA_Lulu_Mid_Bard(Ratings): pass class NA_Lulu_Mid_Blitzcrank(Ratings): pass class NA_Lulu_Mid_Brand(Ratings): pass class NA_Lulu_Mid_Braum(Ratings): pass class NA_Lulu_Mid_Caitlyn(Ratings): pass class NA_Lulu_Mid_Camille(Ratings): pass class NA_Lulu_Mid_Cassiopeia(Ratings): pass class NA_Lulu_Mid_Chogath(Ratings): pass class NA_Lulu_Mid_Corki(Ratings): pass class NA_Lulu_Mid_Darius(Ratings): pass class NA_Lulu_Mid_Diana(Ratings): pass class NA_Lulu_Mid_Draven(Ratings): pass class NA_Lulu_Mid_DrMundo(Ratings): pass class NA_Lulu_Mid_Ekko(Ratings): pass class NA_Lulu_Mid_Elise(Ratings): pass class NA_Lulu_Mid_Evelynn(Ratings): pass class NA_Lulu_Mid_Ezreal(Ratings): pass class NA_Lulu_Mid_Fiddlesticks(Ratings): pass class NA_Lulu_Mid_Fiora(Ratings): pass class NA_Lulu_Mid_Fizz(Ratings): pass class NA_Lulu_Mid_Galio(Ratings): pass class NA_Lulu_Mid_Gangplank(Ratings): pass class NA_Lulu_Mid_Garen(Ratings): pass class NA_Lulu_Mid_Gnar(Ratings): pass class NA_Lulu_Mid_Gragas(Ratings): pass class NA_Lulu_Mid_Graves(Ratings): pass class NA_Lulu_Mid_Hecarim(Ratings): pass class NA_Lulu_Mid_Heimerdinger(Ratings): pass class NA_Lulu_Mid_Illaoi(Ratings): pass class NA_Lulu_Mid_Irelia(Ratings): pass class NA_Lulu_Mid_Ivern(Ratings): pass class NA_Lulu_Mid_Janna(Ratings): pass class NA_Lulu_Mid_JarvanIV(Ratings): pass class NA_Lulu_Mid_Jax(Ratings): pass class NA_Lulu_Mid_Jayce(Ratings): pass class NA_Lulu_Mid_Jhin(Ratings): pass class NA_Lulu_Mid_Jinx(Ratings): pass class NA_Lulu_Mid_Kalista(Ratings): pass class NA_Lulu_Mid_Karma(Ratings): pass class NA_Lulu_Mid_Karthus(Ratings): pass class NA_Lulu_Mid_Kassadin(Ratings): pass class NA_Lulu_Mid_Katarina(Ratings): pass class NA_Lulu_Mid_Kayle(Ratings): pass class NA_Lulu_Mid_Kayn(Ratings): pass class NA_Lulu_Mid_Kennen(Ratings): pass class NA_Lulu_Mid_Khazix(Ratings): pass class NA_Lulu_Mid_Kindred(Ratings): pass class NA_Lulu_Mid_Kled(Ratings): pass class NA_Lulu_Mid_KogMaw(Ratings): pass class NA_Lulu_Mid_Leblanc(Ratings): pass class NA_Lulu_Mid_LeeSin(Ratings): pass class NA_Lulu_Mid_Leona(Ratings): pass class NA_Lulu_Mid_Lissandra(Ratings): pass class NA_Lulu_Mid_Lucian(Ratings): pass class NA_Lulu_Mid_Lulu(Ratings): pass class NA_Lulu_Mid_Lux(Ratings): pass class NA_Lulu_Mid_Malphite(Ratings): pass class NA_Lulu_Mid_Malzahar(Ratings): pass class NA_Lulu_Mid_Maokai(Ratings): pass class NA_Lulu_Mid_MasterYi(Ratings): pass class NA_Lulu_Mid_MissFortune(Ratings): pass class NA_Lulu_Mid_MonkeyKing(Ratings): pass class NA_Lulu_Mid_Mordekaiser(Ratings): pass class NA_Lulu_Mid_Morgana(Ratings): pass class NA_Lulu_Mid_Nami(Ratings): pass class NA_Lulu_Mid_Nasus(Ratings): pass class NA_Lulu_Mid_Nautilus(Ratings): pass class NA_Lulu_Mid_Nidalee(Ratings): pass class NA_Lulu_Mid_Nocturne(Ratings): pass class NA_Lulu_Mid_Nunu(Ratings): pass class NA_Lulu_Mid_Olaf(Ratings): pass class NA_Lulu_Mid_Orianna(Ratings): pass class NA_Lulu_Mid_Ornn(Ratings): pass class NA_Lulu_Mid_Pantheon(Ratings): pass class NA_Lulu_Mid_Poppy(Ratings): pass class NA_Lulu_Mid_Quinn(Ratings): pass class NA_Lulu_Mid_Rakan(Ratings): pass class NA_Lulu_Mid_Rammus(Ratings): pass class NA_Lulu_Mid_RekSai(Ratings): pass class NA_Lulu_Mid_Renekton(Ratings): pass class NA_Lulu_Mid_Rengar(Ratings): pass class NA_Lulu_Mid_Riven(Ratings): pass class NA_Lulu_Mid_Rumble(Ratings): pass class NA_Lulu_Mid_Ryze(Ratings): pass class NA_Lulu_Mid_Sejuani(Ratings): pass class NA_Lulu_Mid_Shaco(Ratings): pass class NA_Lulu_Mid_Shen(Ratings): pass class NA_Lulu_Mid_Shyvana(Ratings): pass class NA_Lulu_Mid_Singed(Ratings): pass class NA_Lulu_Mid_Sion(Ratings): pass class NA_Lulu_Mid_Sivir(Ratings): pass class NA_Lulu_Mid_Skarner(Ratings): pass class NA_Lulu_Mid_Sona(Ratings): pass class NA_Lulu_Mid_Soraka(Ratings): pass class NA_Lulu_Mid_Swain(Ratings): pass class NA_Lulu_Mid_Syndra(Ratings): pass class NA_Lulu_Mid_TahmKench(Ratings): pass class NA_Lulu_Mid_Taliyah(Ratings): pass class NA_Lulu_Mid_Talon(Ratings): pass class NA_Lulu_Mid_Taric(Ratings): pass class NA_Lulu_Mid_Teemo(Ratings): pass class NA_Lulu_Mid_Thresh(Ratings): pass class NA_Lulu_Mid_Tristana(Ratings): pass class NA_Lulu_Mid_Trundle(Ratings): pass class NA_Lulu_Mid_Tryndamere(Ratings): pass class NA_Lulu_Mid_TwistedFate(Ratings): pass class NA_Lulu_Mid_Twitch(Ratings): pass class NA_Lulu_Mid_Udyr(Ratings): pass class NA_Lulu_Mid_Urgot(Ratings): pass class NA_Lulu_Mid_Varus(Ratings): pass class NA_Lulu_Mid_Vayne(Ratings): pass class NA_Lulu_Mid_Veigar(Ratings): pass class NA_Lulu_Mid_Velkoz(Ratings): pass class NA_Lulu_Mid_Vi(Ratings): pass class NA_Lulu_Mid_Viktor(Ratings): pass class NA_Lulu_Mid_Vladimir(Ratings): pass class NA_Lulu_Mid_Volibear(Ratings): pass class NA_Lulu_Mid_Warwick(Ratings): pass class NA_Lulu_Mid_Xayah(Ratings): pass class NA_Lulu_Mid_Xerath(Ratings): pass class NA_Lulu_Mid_XinZhao(Ratings): pass class NA_Lulu_Mid_Yasuo(Ratings): pass class NA_Lulu_Mid_Yorick(Ratings): pass class NA_Lulu_Mid_Zac(Ratings): pass class NA_Lulu_Mid_Zed(Ratings): pass class NA_Lulu_Mid_Ziggs(Ratings): pass class NA_Lulu_Mid_Zilean(Ratings): pass class NA_Lulu_Mid_Zyra(Ratings): pass
15.033573
46
0.75642
972
6,269
4.452675
0.151235
0.223198
0.350739
0.446396
0.791359
0.791359
0
0
0
0
0
0
0.177221
6,269
416
47
15.069712
0.839085
0
0
0.498195
0
0
0
0
0
0
0
0
0
1
0
true
0.498195
0.00361
0
0.501805
0
0
0
0
null
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
7
a4cc8e98e175f71654b2e555a014327edd4a1834
585
py
Python
train_mosmed_timm-regnetx_002_shift_scale_rotate.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
train_mosmed_timm-regnetx_002_shift_scale_rotate.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
train_mosmed_timm-regnetx_002_shift_scale_rotate.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
import os ls=["python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold0_shift_scale_rotate.yml", "python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold1_shift_scale_rotate.yml", "python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold2_shift_scale_rotate.yml", "python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold3_shift_scale_rotate.yml", "python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold4_shift_scale_rotate.yml", ] for l in ls: os.system(l)
53.181818
111
0.85812
90
585
5.133333
0.288889
0.108225
0.12987
0.205628
0.84632
0.84632
0.84632
0.84632
0.84632
0.84632
0
0.036101
0.052991
585
11
112
53.181818
0.797834
0
0
0
0
0
0.887372
0.674061
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
a4d12071634493f3e74eaa5170bd78106b29db5e
28,415
py
Python
infoblox_netmri/api/broker/v3_8_0/access_objects_access_search_issue_type_broker.py
infobloxopen/infoblox_netmri
aa1c744df7e439dbe163bb9edd165e4e85a9771b
[ "Apache-2.0" ]
12
2016-02-19T12:37:54.000Z
2022-03-04T20:11:08.000Z
infoblox_netmri/api/broker/v3_8_0/access_objects_access_search_issue_type_broker.py
azinfoblox/infoblox-netmri
02372c5231e2677ab6299cb659a73c9a41b4b0f4
[ "Apache-2.0" ]
18
2015-11-12T18:37:00.000Z
2021-05-19T07:59:55.000Z
infoblox_netmri/api/broker/v3_8_0/access_objects_access_search_issue_type_broker.py
azinfoblox/infoblox-netmri
02372c5231e2677ab6299cb659a73c9a41b4b0f4
[ "Apache-2.0" ]
18
2016-01-07T12:04:34.000Z
2022-03-31T11:05:41.000Z
from ..broker import Broker class AccessObjectsAccessSearchIssueTypeBroker(Broker): controller = "access_objects_access_search_issue_types" def show(self, **kwargs): """Shows the details for the specified access objects access search issue type. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param id: The internal NetMRI identifier for this access search alert. :type id: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return access_objects_access_search_issue_type: The access objects access search issue type identified by the specified id. :rtype access_objects_access_search_issue_type: AccessObjectsAccessSearchIssueType """ return self.api_request(self._get_method_fullname("show"), kwargs) def index(self, **kwargs): """Lists the available access objects access search issue types. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient. **Inputs** | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param id: The internal NetMRI identifier for this access search alert. :type id: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` id :param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, access_search_issue_type_id, access_search_issue_type_field, element_type, element_id, name, value. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each AccessObjectsAccessSearchIssueType. Valid values are id, access_search_issue_type_id, access_search_issue_type_field, element_type, element_id, name, value. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return access_objects_access_search_issue_types: An array of the AccessObjectsAccessSearchIssueType objects that match the specified input criteria. :rtype access_objects_access_search_issue_types: Array of AccessObjectsAccessSearchIssueType """ return self.api_list_request(self._get_method_fullname("index"), kwargs) def search(self, **kwargs): """Lists the available access objects access search issue types matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below. **Inputs** | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param access_search_issue_type_field: The name of the issue type field. :type access_search_issue_type_field: Array of String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param access_search_issue_type_id: The internal NetMRI identifier for the access search issue type this record is associated with. :type access_search_issue_type_id: Array of Integer | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param element_id: The internal NetMRI identifier for the related element_type, NULL for a direct value. :type element_id: Array of Integer | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param element_type: The element type, DeviceObject, DeviceService, IPv4Value, or FlowValue. :type element_type: Array of String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param id: The internal NetMRI identifier for this access search alert. :type id: Array of Integer | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param name: The global name of the object, NULL for a direct value. :type name: Array of String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param value: The value of the object or direct value. A list of ipv4 or flow value in csv format. :type value: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` id :param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, access_search_issue_type_id, access_search_issue_type_field, element_type, element_id, name, value. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each AccessObjectsAccessSearchIssueType. Valid values are id, access_search_issue_type_id, access_search_issue_type_field, element_type, element_id, name, value. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param query: This value will be matched against access objects access search issue types, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: access_search_issue_type_field, access_search_issue_type_id, element_id, element_type, id, name, value. :type query: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return access_objects_access_search_issue_types: An array of the AccessObjectsAccessSearchIssueType objects that match the specified input criteria. :rtype access_objects_access_search_issue_types: Array of AccessObjectsAccessSearchIssueType """ return self.api_list_request(self._get_method_fullname("search"), kwargs) def find(self, **kwargs): """Lists the available access objects access search issue types matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: access_search_issue_type_field, access_search_issue_type_id, element_id, element_type, id, name, value. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_access_search_issue_type_field: The operator to apply to the field access_search_issue_type_field. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. access_search_issue_type_field: The name of the issue type field. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_access_search_issue_type_field: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_access_search_issue_type_field: If op_access_search_issue_type_field is specified, the field named in this input will be compared to the value in access_search_issue_type_field using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_access_search_issue_type_field must be specified if op_access_search_issue_type_field is specified. :type val_f_access_search_issue_type_field: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_access_search_issue_type_field: If op_access_search_issue_type_field is specified, this value will be compared to the value in access_search_issue_type_field using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_access_search_issue_type_field must be specified if op_access_search_issue_type_field is specified. :type val_c_access_search_issue_type_field: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_access_search_issue_type_id: The operator to apply to the field access_search_issue_type_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. access_search_issue_type_id: The internal NetMRI identifier for the access search issue type this record is associated with. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_access_search_issue_type_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_access_search_issue_type_id: If op_access_search_issue_type_id is specified, the field named in this input will be compared to the value in access_search_issue_type_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_access_search_issue_type_id must be specified if op_access_search_issue_type_id is specified. :type val_f_access_search_issue_type_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_access_search_issue_type_id: If op_access_search_issue_type_id is specified, this value will be compared to the value in access_search_issue_type_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_access_search_issue_type_id must be specified if op_access_search_issue_type_id is specified. :type val_c_access_search_issue_type_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_element_id: The operator to apply to the field element_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. element_id: The internal NetMRI identifier for the related element_type, NULL for a direct value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_element_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_element_id: If op_element_id is specified, the field named in this input will be compared to the value in element_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_element_id must be specified if op_element_id is specified. :type val_f_element_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_element_id: If op_element_id is specified, this value will be compared to the value in element_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_element_id must be specified if op_element_id is specified. :type val_c_element_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_element_type: The operator to apply to the field element_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. element_type: The element type, DeviceObject, DeviceService, IPv4Value, or FlowValue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_element_type: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_element_type: If op_element_type is specified, the field named in this input will be compared to the value in element_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_element_type must be specified if op_element_type is specified. :type val_f_element_type: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_element_type: If op_element_type is specified, this value will be compared to the value in element_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_element_type must be specified if op_element_type is specified. :type val_c_element_type: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this access search alert. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified. :type val_f_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified. :type val_c_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The global name of the object, NULL for a direct value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified. :type val_f_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified. :type val_c_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_value: The operator to apply to the field value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. value: The value of the object or direct value. A list of ipv4 or flow value in csv format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_value: If op_value is specified, the field named in this input will be compared to the value in value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_value must be specified if op_value is specified. :type val_f_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_value: If op_value is specified, this value will be compared to the value in value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_value must be specified if op_value is specified. :type val_c_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` id :param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, access_search_issue_type_id, access_search_issue_type_field, element_type, element_id, name, value. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each AccessObjectsAccessSearchIssueType. Valid values are id, access_search_issue_type_id, access_search_issue_type_field, element_type, element_id, name, value. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return access_objects_access_search_issue_types: An array of the AccessObjectsAccessSearchIssueType objects that match the specified input criteria. :rtype access_objects_access_search_issue_types: Array of AccessObjectsAccessSearchIssueType """ return self.api_list_request(self._get_method_fullname("find"), kwargs)
55.282101
541
0.623298
3,786
28,415
4.553619
0.066033
0.067285
0.068039
0.07065
0.923376
0.918271
0.902958
0.89971
0.889965
0.889733
0
0.004957
0.297167
28,415
513
542
55.389864
0.858295
0.82474
0
0
0
0
0.093502
0.063391
0
0
0
0
0
1
0.363636
false
0
0.090909
0
1
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
9
101f58e96a740af06a56ede42bbbfb1e7cfe0ac0
2,204
py
Python
new_york_times/abc.py
Ovlic/ovlic.py
e776f5f84fbb15c12866a2d49997a21acde29fdb
[ "MIT" ]
null
null
null
new_york_times/abc.py
Ovlic/ovlic.py
e776f5f84fbb15c12866a2d49997a21acde29fdb
[ "MIT" ]
null
null
null
new_york_times/abc.py
Ovlic/ovlic.py
e776f5f84fbb15c12866a2d49997a21acde29fdb
[ "MIT" ]
null
null
null
import abc class Article(metaclass=abc.ABCMeta): @property @abc.abstractmethod def section(self): pass @property @abc.abstractmethod def subsection(self): pass @property @abc.abstractmethod def title(self): pass @property @abc.abstractmethod def abstract(self): pass @property @abc.abstractmethod def byline(self): pass @property @abc.abstractmethod def url(self): pass @property @abc.abstractmethod def uri(self): pass @property @abc.abstractmethod def updated_date(self): pass @property @abc.abstractmethod def created_date(self): pass @property @abc.abstractmethod def published_date(self): pass @property @abc.abstractmethod def material_type_facet(self): pass @property @abc.abstractmethod def kicker(self): pass @property @abc.abstractmethod def des_facet(self): pass @property @abc.abstractmethod def org_facet(self): pass @property @abc.abstractmethod def per_facet(self): pass @property @abc.abstractmethod def geo_facet(self): pass @property @abc.abstractmethod def short_url(self): pass class Image(metaclass=abc.ABCMeta): @property @abc.abstractmethod def url(self): pass @property @abc.abstractmethod def format(self): pass @property @abc.abstractmethod def caption(self): pass @property @abc.abstractmethod def copyright(self): pass @property @abc.abstractmethod def media_type(self): pass @property @abc.abstractmethod def width(self): pass @property @abc.abstractmethod def height(self): pass
17.918699
38
0.51225
191
2,204
5.853403
0.198953
0.236136
0.536673
0.601073
0.830948
0.830948
0.469589
0.119857
0.119857
0.119857
0
0
0.417877
2,204
123
39
17.918699
0.871395
0
0
0.747475
0
0
0
0
0
0
0
0
0
1
0.242424
false
0.242424
0.010101
0
0.272727
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
8
102a970910995b0df6fd9db8a2ad7b10b2daa146
167
py
Python
tottle/types/updates/inline_query.py
muffleo/tottle
69a5bdda879ab56d43505d517d3369a687c135a2
[ "MIT" ]
12
2020-09-06T15:31:34.000Z
2021-02-27T20:30:34.000Z
tottle/types/updates/inline_query.py
cyanlabs-org/tottle
6cf02022ed7b445c9b5af475c6e854b91780d792
[ "MIT" ]
2
2021-04-13T06:43:42.000Z
2021-07-07T20:52:39.000Z
tottle/types/updates/inline_query.py
cyanlabs-org/tottle
6cf02022ed7b445c9b5af475c6e854b91780d792
[ "MIT" ]
4
2020-09-12T03:09:25.000Z
2021-03-22T08:52:04.000Z
from tottle.types.objects.query import InlineQuery from tottle.types.updates.base import BaseBotUpdate class InlineQueryUpdate(BaseBotUpdate, InlineQuery): pass
23.857143
52
0.832335
19
167
7.315789
0.684211
0.143885
0.215827
0
0
0
0
0
0
0
0
0
0.107784
167
6
53
27.833333
0.932886
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
7
1075eead3dd737933ca14a307ec342f2a84bc5e1
13,233
py
Python
covid19_api/api/namespaces/vaccination_data.py
chong601/moh-covid19-api
b762755e9ff5bb11b99102d305753dcd6122afbe
[ "MIT" ]
1
2021-09-15T13:49:05.000Z
2021-09-15T13:49:05.000Z
covid19_api/api/namespaces/vaccination_data.py
chong601/moh-covid19-api
b762755e9ff5bb11b99102d305753dcd6122afbe
[ "MIT" ]
2
2021-08-15T07:56:20.000Z
2021-08-17T22:41:44.000Z
covid19_api/api/namespaces/vaccination_data.py
chong601/moh-covid19-api
b762755e9ff5bb11b99102d305753dcd6122afbe
[ "MIT" ]
null
null
null
from flask_restx import Namespace, Resource, fields, abort from flask_sqlalchemy import Pagination from covid19_api.db_model.sqlalchemy_models import VaxMalaysia, VaxState from covid19_api.api import db api = Namespace('vaccination', 'COVID-19 vaccination data') vax_malaysia = api.model('vax_malaysia', { 'row_id': fields.Integer(title='Row ID'), 'row_version': fields.Integer(title='Row version'), 'date': fields.Date(title='Date'), 'daily_partial': fields.Integer(title='1st doses (for double-dose vaccines) delivered between 0000 and 2359 on date'), 'daily_full': fields.Integer(title='2nd doses (for double-dose vaccines) and 1-dose vaccines (e.g. Cansino) delivered between 0000 and 2359 on date.'), 'daily': fields.Integer(title='Total daily delivered between 0000 and 2359 on date'), 'daily_partial_child': fields.Integer(title='1st doses (for double-dose vaccines) delivered for children between 0000 and 2359 on date'), 'daily_full_child': fields.Integer(title='2nd doses (for double-dose vaccines) and 1-dose vaccines (e.g. Cansino) delivered for children between 0000 and 2359 on date.'), 'cumul_partial': fields.Integer(title='Sum of cumulative partial doses delivered until row date'), 'cumul_full': fields.Integer(title='Sum of cumulative full doses delivered until row date'), 'cumul': fields.Integer(title='Total cumulative doses delivered until row date'), 'cumul_partial_child': fields.Integer(title='Sum of cumulative 1st doses (for double-dose vaccines) delivered for children between 0000 and 2359 on date'), 'cumul_full_child': fields.Integer(title='Sum of cumulative 2nd doses (for double-dose vaccines) and 1-dose vaccines (e.g. Cansino) delivered for children between 0000 and 2359 on date.'), 'pfizer1': fields.Integer(title='1st dose of PFizer vaccine delivered between 0000 and 2359 on date'), 'pfizer2': fields.Integer(title='2nd dose of PFizer vaccine delivered between 0000 and 2359 on date'), 'sinovac1': fields.Integer(title='1st dose of SinoVac vaccine delivered between 0000 and 2359 on date'), 'sinovac2': fields.Integer(title='2nd dose of SinoVac vaccine delivered between 0000 and 2359 on date'), 'astra1': fields.Integer(title='1st dose of AstraZeneca vaccine delivered between 0000 and 2359 on date'), 'astra2': fields.Integer(title='2nd dose of AstraZeneca vaccine delivered between 0000 and 2359 on date'), 'astra2': fields.Integer(title='2nd dose of AstraZeneca vaccine delivered between 0000 and 2359 on date'), 'cansino': fields.Integer(title='Single-dose CanSino vaccine delivered between 0000 and 2359 on date'), 'pending': fields.Integer(title='Doses delivered that are quarantined in VMS (Vaccine Management System)'), }) vax_state = api.model('vax_state', { 'row_id': fields.Integer(title='Row ID'), 'row_version': fields.Integer(title='Row version'), 'date': fields.Date(title='Date'), 'state': fields.String(title='State name'), 'daily_partial': fields.Integer(title='1st doses (for double-dose vaccines) delivered between 0000 and 2359 on date'), 'daily_full': fields.Integer(title='2nd doses (for double-dose vaccines) and 1-dose vaccines (e.g. Cansino) delivered between 0000 and 2359 on date.'), 'daily': fields.Integer(title='Total daily delivered between 0000 and 2359 on date'), 'daily_partial_child': fields.Integer(title='1st doses (for double-dose vaccines) delivered for children between 0000 and 2359 on date'), 'daily_full_child': fields.Integer(title='2nd doses (for double-dose vaccines) and 1-dose vaccines (e.g. Cansino) delivered for children between 0000 and 2359 on date.'), 'cumul_partial': fields.Integer(title='Sum of cumulative partial doses delivered until row date'), 'cumul_full': fields.Integer(title='Sum of cumulative full doses delivered until row date'), 'cumul': fields.Integer(title='Total cumulative doses delivered until row date'), 'cumul_partial_child': fields.Integer(title='Sum of cumulative 1st doses (for double-dose vaccines) delivered for children between 0000 and 2359 on date'), 'cumul_full_child': fields.Integer(title='Sum of cumulative 2nd doses (for double-dose vaccines) and 1-dose vaccines (e.g. Cansino) delivered for children between 0000 and 2359 on date.'), 'pfizer1': fields.Integer(title='1st dose of PFizer vaccine delivered between 0000 and 2359 on date'), 'pfizer2': fields.Integer(title='2nd dose of PFizer vaccine delivered between 0000 and 2359 on date'), 'sinovac1': fields.Integer(title='1st dose of SinoVac vaccine delivered between 0000 and 2359 on date'), 'sinovac2': fields.Integer(title='2nd dose of SinoVac vaccine delivered between 0000 and 2359 on date'), 'astra1': fields.Integer(title='1st dose of AstraZeneca vaccine delivered between 0000 and 2359 on date'), 'astra2': fields.Integer(title='2nd dose of AstraZeneca vaccine delivered between 0000 and 2359 on date'), 'cansino': fields.Integer(title='Single-dose CanSino vaccine delivered between 0000 and 2359 on date'), 'pending': fields.Integer(title='Doses delivered that are quarantined in VMS (Vaccine Management System)'), }) pagination_parser = api.parser() pagination_parser.add_argument('page', location='args', help='Page number', type=int) pagination_parser.add_argument('size', location='args', help='Items per page', type=int) @api.route('/vax_malaysia') class VaxMalaysiaWithPagination(Resource): @api.expect(pagination_parser) @api.marshal_with(vax_malaysia, as_list=True, skip_none=True) def get(self): """ Get country-wide vaccination data with pagination support Defaults to vaccination data for the last 7 days sorted by date in ascending order. Size parameter is optional and defaults to 10. """ args: dict = pagination_parser.parse_args() page = args.get('page') or 1 size = args.get('size') or 7 date_subquery = db.session.query(VaxMalaysia.date) query = db.session.query(VaxMalaysia) if not (args['page'] or args['size']): date_subquery = date_subquery.order_by(VaxMalaysia.date.desc()).limit(7) query = query.filter(VaxMalaysia.date.in_(date_subquery)).order_by(VaxMalaysia.date) result:Pagination = query.paginate(page, size, error_out=False) if result.items: return result.items abort(404, f"Invalid page number '{page}'. Valid page numbers are between 1 to {result.pages} for size of {result.per_page} item(s)") @api.route('/vax_malaysia/<string:date>') class VaxMalaysiaByDate(Resource): @api.marshal_with(vax_malaysia, skip_none=True) def get(self, date): """ Get vaccination data by date Defaults to vaccination data for the last 7 days sorted by date in ascending order. Size parameter is optional and defaults to 10. """ query = db.session.query(VaxMalaysia).filter(VaxMalaysia.date == date) if db.session.query(query.exists()).scalar(): result = query.first() return result abort(404, error=f"Date '{date}' is not found in database.") @api.route('/vax_state') class VaxStateWithPagination(Resource): @api.expect(pagination_parser) @api.marshal_with(vax_state, as_list=True, skip_none=True) def get(self): """ Get country-wide vaccination data with pagination support Defaults to vaccination data for the last 7 days sorted by date in ascending order. Size parameter is optional and defaults to 10. """ args: dict = pagination_parser.parse_args() page = args.get('page') or 1 # We don't use size against the final result, instead on the number of dates size = args.get('size') or 7 date_subquery = db.session.query(VaxState.date).group_by(VaxState.date) query = db.session.query(VaxState) if not (args['page'] or args['size']): date_subquery = date_subquery.order_by(VaxState.date.desc()).limit(7) query = query.filter(VaxState.date.in_(date_subquery)).order_by(VaxState.date, VaxState.state) return query.all() # Handle date bullshit first, then deal with actual data # Get dates based on the pagination values date_result: Pagination = date_subquery.paginate(page, size, error_out=False) # Get all dates returned by the pagination dates = [date[0] for date in date_result.items] # Future project: implement pagination logic and expose it to end user attr = {a: getattr(date_result, a) for a in dir(date_result) if not a.startswith('__') and not callable(getattr(date_result, a))} if 'query' in attr: compile = attr['query'].statement.compile() attr.update({'query_string': compile.string}) attr.update({'query_param': compile.params}) del attr['query'] # Query the database with the rows selected from pagination # Think of this as a subquery-ish method, except that the query is done separately like: # # pagination_result = select date from vax_state group by date order by date offset (SELECT (page_number - 1) * size) limit size; # query = select * from vax_state where date in (pagination.result); query = query.filter(VaxState.date.in_(dates)).order_by(VaxState.date, VaxState.state) result = query.all() if result: return result abort(404, f"Invalid page number '{page}'. Valid page numbers are between 1 to {date_result.pages} for size of {date_result.per_page} item(s)") @api.route('/vax_state/<string:state>') class VaxStateByStateWithPagination(Resource): @api.expect(pagination_parser) @api.marshal_with(vax_state, as_list=True, skip_none=True) def get(self, state=None): """ Get vaccination data by date Defaults to vaccination data for the last 7 days sorted by date in ascending order. Size parameter is optional and defaults to 10. """ args: dict = pagination_parser.parse_args() page = args.get('page') or 1 size = args.get('size') or 7 date_subquery = db.session.query(VaxState.date).group_by(VaxState.date) query = db.session.query(VaxState) if state != 'all': state_exists = db.session.query(db.session.query(VaxState.state).filter(VaxState.state.ilike(f'%{state}')).exists()).scalar() if state_exists: query = query.filter(VaxState.state.ilike(f'%{state}')) else: abort(404, f"State name '{state}' not found in database") if not (args['page'] or args['size']): date_subquery = date_subquery.order_by(VaxState.date.desc()).limit(7) query = query.filter(VaxState.date.in_(date_subquery)).order_by(VaxState.date, VaxState.state) return query.all() # Handle date bullshit first, then deal with actual data # Get dates based on the pagination values date_subquery = date_subquery.order_by(VaxState.date) date_result: Pagination = date_subquery.paginate(page, size, error_out=False) # Get all dates returned by the pagination dates = [date[0] for date in date_result.items] # Future project: implement pagination logic and expose it to end user attr = {a: getattr(date_result, a) for a in dir(date_result) if not a.startswith('__') and not callable(getattr(date_result, a))} if 'query' in attr: compile = attr['query'].statement.compile() attr.update({'query_string': compile.string}) attr.update({'query_param': compile.params}) del attr['query'] # Query the database with the rows selected from pagination # Think of this as a subquery-ish method, except that the query is done separately like: # # pagination_result = select date from hospital_by_state group by date order by date offset (SELECT (page_number - 1) * size) limit size; # query = select * from hospital_by_state where date in (pagination.result); query = query.filter(VaxState.date.in_(dates)).order_by(VaxState.date, VaxState.state) result = query.all() if result: return result abort(404, f"Invalid page number '{page}'. Valid page numbers are between 1 to {date_result.pages} for size of {date_result.per_page} item(s)") @api.route('/vax_state/<string:state>/<string:date>') class VaxStateByStateByDateWithPagination(Resource): @api.marshal_with(vax_state, skip_none=True) def get(self, state=None, date=None): query = db.session.query(VaxState) if state == 'all': query = query.filter(VaxState.date == date) return query.all() else: query = query.filter(VaxState.state.ilike(state), VaxState.date == date) if db.session.query(query.exists()).scalar(): result = query.first() return result abort(404, error=f"State '{state}' with date '{date}' is not found in database.")
54.681818
192
0.691453
1,826
13,233
4.930449
0.116101
0.059202
0.081973
0.057981
0.888704
0.865378
0.842497
0.832945
0.81595
0.810397
0
0.031037
0.20139
13,233
241
193
54.908714
0.820874
0.142598
0
0.700637
0
0.070064
0.37839
0.012085
0
0
0
0
0
1
0.031847
false
0
0.025478
0
0.140127
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
1077202699765ff67f73d84dc61ebb612f3931b2
350
py
Python
temboo/core/Library/Utilities/Authentication/OAuth1/__init__.py
jordanemedlock/psychtruths
52e09033ade9608bd5143129f8a1bfac22d634dd
[ "Apache-2.0" ]
7
2016-03-07T02:07:21.000Z
2022-01-21T02:22:41.000Z
temboo/core/Library/Utilities/Authentication/OAuth1/__init__.py
jordanemedlock/psychtruths
52e09033ade9608bd5143129f8a1bfac22d634dd
[ "Apache-2.0" ]
null
null
null
temboo/core/Library/Utilities/Authentication/OAuth1/__init__.py
jordanemedlock/psychtruths
52e09033ade9608bd5143129f8a1bfac22d634dd
[ "Apache-2.0" ]
8
2016-06-14T06:01:11.000Z
2020-04-22T09:21:44.000Z
from temboo.Library.Utilities.Authentication.OAuth1.FinalizeOAuth import FinalizeOAuth, FinalizeOAuthInputSet, FinalizeOAuthResultSet, FinalizeOAuthChoreographyExecution from temboo.Library.Utilities.Authentication.OAuth1.InitializeOAuth import InitializeOAuth, InitializeOAuthInputSet, InitializeOAuthResultSet, InitializeOAuthChoreographyExecution
116.666667
179
0.914286
24
350
13.333333
0.625
0.0625
0.10625
0.1625
0.2875
0.2875
0
0
0
0
0
0.005952
0.04
350
2
180
175
0.946429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
1091cd6932c5283c0ab3c622b185960f5f978079
120
py
Python
imagedt/tools/metrics/__init__.py
Ken2yLiu/ImageDT
2fb08ed67c94c690ab5845a949d58d8fb3ff4ee5
[ "Apache-2.0" ]
null
null
null
imagedt/tools/metrics/__init__.py
Ken2yLiu/ImageDT
2fb08ed67c94c690ab5845a949d58d8fb3ff4ee5
[ "Apache-2.0" ]
null
null
null
imagedt/tools/metrics/__init__.py
Ken2yLiu/ImageDT
2fb08ed67c94c690ab5845a949d58d8fb3ff4ee5
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 from __future__ import absolute_import from __future__ import print_function from .detect_eval import *
24
38
0.833333
17
120
5.235294
0.647059
0.224719
0.359551
0
0
0
0
0
0
0
0
0.009524
0.125
120
5
39
24
0.838095
0.108333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.333333
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
52d02f231246db5a41e8acc5d8123e3c9fa72af0
4,917
py
Python
testing/indexer/TestIndexerCases.py
Berdugo1994/Tweeter-Search-Engine
ff80707d64b792288b877814d79e39c5b5ceb7ad
[ "MIT" ]
null
null
null
testing/indexer/TestIndexerCases.py
Berdugo1994/Tweeter-Search-Engine
ff80707d64b792288b877814d79e39c5b5ceb7ad
[ "MIT" ]
null
null
null
testing/indexer/TestIndexerCases.py
Berdugo1994/Tweeter-Search-Engine
ff80707d64b792288b877814d79e39c5b5ceb7ad
[ "MIT" ]
null
null
null
import unittest import os from module_glove import GloVe from document import Document from indexer import Indexer from parser_module import Parse class TestIndexerCases(unittest.TestCase): glove = GloVe() root = os.path.dirname(os.path.abspath(__file__)) corpus_path = root + '\\corpus' output_path = root + '\\results_old' def test_upper_case(self): indexer = Indexer(self.glove,self.output_path) dict = {"Hello": 1, "goodbye": 1} doc = Document("tweet_id", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) expected = {"HELLO", "goodbye"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual) def test_upper_case2(self): indexer = Indexer(self.glove,self.output_path) dict = {"Hello": 1, "goodbye": 1} doc = Document("tweet_id", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) dict = {"hello": 1, "Goodbye": 1} doc = Document("tweet_id2", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) expected = {"hello", "goodbye"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual) def test_upper_case3(self): indexer = Indexer(self.glove,self.output_path) dict = {"heLLo": 1, "Goodbye": 1} doc = Document("tweet_id", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) expected = {"hello", "GOODBYE"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual) def test_upper_case4(self): indexer = Indexer(self.glove,self.output_path) dict = {"heLLo": 1, "Goodbye": 1} doc = Document("tweet_id", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) dict = {"HeLLo": 1, "GOODBYE": 1} doc = Document("tweet_id2", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) expected = {"hello", "GOODBYE"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual) def test_upper_case_file_saved(self): indexer = Indexer(self.glove,self.output_path) dict = {"Hello": 1, "goodbye": 1} doc = Document("tweet_id", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) indexer.save_partial_indexer() dict = {"hello": 1, "Goodbye": 1} doc = Document("tweet_id2", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) indexer.save_partial_indexer() indexer._do_upper_case_fixes() expected = {"hello", "goodbye"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual) def test_upper_case2_save_file(self): indexer = Indexer(self.glove,self.output_path) dict = {"heLLo": 1, "Goodbye": 1} doc = Document("tweet_id", "tweet_date", "full_text2", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) indexer.save_partial_indexer() dict = {"HeLLo": 1, "GOODBYE": 1} doc = Document("tweet_id2", "tweet_date", "full_text", "url", "retweet_text", "retweet_url", "quote_text", "quote_url", dict, "doc_length") indexer.add_new_doc(doc) expected = {"hello", "GOODBYE"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual) def test_upper_case_failing(self): text = 'cat and jack kids face mask 2 packs 4 Cat and Jack Kids Face Mask' p = Parse().parse_sentence(text) indexer = Indexer(GloVe(), 'C:\\Users\\Sarit Hollander\\Desktop\\Study\\Year C\\Semester A\\IR\\Search Engine\\SearchEngine\\results_old') indexer.add_new_doc(p) expected = {"HELLO", "goodbye"} actual = set(indexer.inverted_idx.keys()) self.assertEqual(expected, actual)
39.97561
147
0.592638
580
4,917
4.756897
0.146552
0.03987
0.05183
0.063791
0.824574
0.824574
0.808626
0.808626
0.808626
0.808626
0
0.008575
0.264796
4,917
122
148
40.303279
0.754633
0
0
0.741935
0
0.010753
0.255319
0.01335
0
0
0
0
0.075269
1
0.075269
false
0
0.064516
0
0.193548
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
52f282c22e4a99b6a1727fbcd54609463bcf17ed
62
py
Python
peasabot/__init__.py
gimait/DaDSbot
6ee6fea2339faa9a9a2fce29c3b00def378d88d3
[ "MIT" ]
null
null
null
peasabot/__init__.py
gimait/DaDSbot
6ee6fea2339faa9a9a2fce29c3b00def378d88d3
[ "MIT" ]
null
null
null
peasabot/__init__.py
gimait/DaDSbot
6ee6fea2339faa9a9a2fce29c3b00def378d88d3
[ "MIT" ]
null
null
null
from .peasabot import Agent def agent(): return Agent()
10.333333
27
0.677419
8
62
5.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.225806
62
5
28
12.4
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
7
5e2562d357a7715aed70c122459d21789a1e7ed5
38,201
py
Python
modules.py
ekwebb/fNRI
5fad3a498d7660bfc512c276d2b09440c1de56f2
[ "MIT" ]
28
2019-06-05T07:33:12.000Z
2022-01-04T10:23:30.000Z
modules.py
etarakci-hvl/fNRI
5fad3a498d7660bfc512c276d2b09440c1de56f2
[ "MIT" ]
null
null
null
modules.py
etarakci-hvl/fNRI
5fad3a498d7660bfc512c276d2b09440c1de56f2
[ "MIT" ]
8
2019-11-20T14:41:37.000Z
2021-10-31T21:22:48.000Z
""" This code is based on https://github.com/ethanfetaya/NRI (MIT licence) """ import torch import torch.nn as nn import torch.nn.functional as F import math from torch.autograd import Variable from utils import my_softmax, get_offdiag_indices, gumbel_softmax _EPS = 1e-10 class MLP(nn.Module): """Two-layer fully-connected ELU net with batch norm.""" def __init__(self, n_in, n_hid, n_out, do_prob=0.): super(MLP, self).__init__() self.fc1 = nn.Linear(n_in, n_hid) self.fc2 = nn.Linear(n_hid, n_out) self.bn = nn.BatchNorm1d(n_out) self.dropout_prob = do_prob self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight.data) m.bias.data.fill_(0.1) elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def batch_norm(self, inputs): x = inputs.view(inputs.size(0) * inputs.size(1), -1) x = self.bn(x) return x.view(inputs.size(0), inputs.size(1), -1) def forward(self, inputs): # Input shape: [num_sims, num_things, num_features] x = F.elu(self.fc1(inputs)) x = F.dropout(x, self.dropout_prob, training=self.training) x = F.elu(self.fc2(x)) return self.batch_norm(x) class CNN(nn.Module): def __init__(self, n_in, n_hid, n_out, do_prob=0.): super(CNN, self).__init__() self.pool = nn.MaxPool1d(kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False) self.conv1 = nn.Conv1d(n_in, n_hid, kernel_size=5, stride=1, padding=0) self.bn1 = nn.BatchNorm1d(n_hid) self.conv2 = nn.Conv1d(n_hid, n_hid, kernel_size=5, stride=1, padding=0) self.bn2 = nn.BatchNorm1d(n_hid) self.conv_predict = nn.Conv1d(n_hid, n_out, kernel_size=1) self.conv_attention = nn.Conv1d(n_hid, 1, kernel_size=1) self.dropout_prob = do_prob self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv1d): n = m.kernel_size[0] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) m.bias.data.fill_(0.1) elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, inputs): # Input shape: [num_sims * num_edges, num_dims, num_timesteps] x = F.relu(self.conv1(inputs)) x = self.bn1(x) x = F.dropout(x, self.dropout_prob, training=self.training) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.bn2(x) pred = self.conv_predict(x) attention = my_softmax(self.conv_attention(x), axis=2) edge_prob = (pred * attention).mean(dim=2) return edge_prob class MLPEncoder(nn.Module): def __init__(self, n_in, n_hid, n_out, do_prob=0., factor=True): super(MLPEncoder, self).__init__() self.factor = factor # n_hid = num edge types self.mlp1 = MLP(n_in, n_hid, n_hid, do_prob) self.mlp2 = MLP(n_hid * 2, n_hid, n_hid, do_prob) self.mlp3 = MLP(n_hid, n_hid, n_hid, do_prob) if self.factor: self.mlp4 = MLP(n_hid * 3, n_hid, n_hid, do_prob) print("Using factor graph MLP encoder.") else: self.mlp4 = MLP(n_hid * 2, n_hid, n_hid, do_prob) print("Using MLP encoder.") self.fc_out = nn.Linear(n_hid, n_out) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight.data) m.bias.data.fill_(0.1) def edge2node(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. incoming = torch.matmul(rel_rec.t(), x) return incoming / incoming.size(1) def node2edge(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. receivers = torch.matmul(rel_rec, x) senders = torch.matmul(rel_send, x) edges = torch.cat([receivers, senders], dim=2) return edges def forward(self, inputs, rel_rec, rel_send): # Input shape: [num_sims, num_atoms, num_timesteps, num_dims] x = inputs.view(inputs.size(0), inputs.size(1), -1) # New shape: [num_sims, num_atoms, num_timesteps*num_dims] x = self.mlp1(x) # 2-layer ELU net per node x = self.node2edge(x, rel_rec, rel_send) x = self.mlp2(x) x_skip = x if self.factor: x = self.edge2node(x, rel_rec, rel_send) x = self.mlp3(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection x = self.mlp4(x) else: x = self.mlp3(x) x = torch.cat((x, x_skip), dim=2) # Skip connection x = self.mlp4(x) return self.fc_out(x) class MLPEncoder_multi(nn.Module): def __init__(self, n_in, n_hid, edge_types_list, do_prob=0., split_point=1, init_type='xavier_normal', bias_init=0.0): super(MLPEncoder_multi, self).__init__() self.edge_types_list = edge_types_list self.mlp1 = MLP(n_in, n_hid, n_hid, do_prob) #print(self.mlp1.fc1.weight[0][0:5]) self.mlp2 = MLP(n_hid * 2, n_hid, n_hid, do_prob) self.init_type = init_type if self.init_type not in [ 'xavier_normal', 'orthogonal', 'sparse' ]: raise ValueError('This initialization type has not been coded') #print('Using '+self.init_type+' for encoder weight initialization') self.bias_init = bias_init self.split_point = split_point if split_point == 0: self.mlp3 = MLP(n_hid, n_hid, n_hid, do_prob) self.mlp4 = MLP(n_hid * 3, n_hid, n_hid, do_prob) self.fc_out = nn.ModuleList([nn.Linear(n_hid, sum(edge_types_list))]) elif split_point == 1: self.mlp3 = MLP(n_hid, n_hid, n_hid, do_prob) self.mlp4 = nn.ModuleList([MLP(n_hid * 3, n_hid, n_hid, do_prob) for _ in edge_types_list]) self.fc_out = nn.ModuleList([nn.Linear(n_hid, K) for K in edge_types_list]) elif split_point == 2: self.mlp3 = nn.ModuleList([MLP(n_hid, n_hid, n_hid, do_prob) for _ in edge_types_list]) self.mlp4 = nn.ModuleList([MLP(n_hid * 3, n_hid, n_hid, do_prob) for _ in edge_types_list]) self.fc_out = nn.ModuleList([nn.Linear(n_hid, K) for K in edge_types_list]) else: raise ValueError('Split point is not valid, must be 0, 1, or 2') self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): if self.init_type == 'orthogonal': nn.init.orthogonal_(m.weight.data) elif self.init_type == 'xavier_normal': nn.init.xavier_normal_(m.weight.data) elif self.init_type == 'sparse': nn.init.sparse_(m.weight.data, sparsity=0.1) if not math.isclose(self.bias_init, 0, rel_tol=1e-9): m.bias.data.fill_(self.bias_init) def edge2node(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. incoming = torch.matmul(rel_rec.t(), x) return incoming / incoming.size(1) def node2edge(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. receivers = torch.matmul(rel_rec, x) senders = torch.matmul(rel_send, x) edges = torch.cat([receivers, senders], dim=2) return edges def forward(self, inputs, rel_rec, rel_send): # Input shape: [num_sims, num_atoms, num_timesteps, num_dims] x = inputs.view(inputs.size(0), inputs.size(1), -1) # New shape: [num_sims, num_atoms, num_timesteps*num_dims] x = self.mlp1(x) # 2-layer ELU net per node x = self.node2edge(x, rel_rec, rel_send) x = self.mlp2(x) x_skip = x x = self.edge2node(x, rel_rec, rel_send) if self.split_point == 0: x = self.mlp3(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection x = self.mlp4(x) return self.fc_out[0](x) elif self.split_point == 1: x = self.mlp3(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection y_list = [] for i in range(len(self.edge_types_list)): y = self.mlp4[i](x) y_list.append( self.fc_out[i](y) ) return torch.cat(y_list,dim=-1) elif self.split_point == 2: y_list = [] for i in range(len(self.edge_types_list)): y = self.mlp3[i](x) y = self.node2edge(y, rel_rec, rel_send) y = torch.cat((y, x_skip), dim=2) # Skip connection y = self.mlp4[i](y) y_list.append( self.fc_out[i](y) ) return torch.cat(y_list,dim=-1) class MLPEncoder_sigmoid(nn.Module): def __init__(self, n_in, n_hid, num_factors, do_prob=0., split_point=1): super(MLPEncoder_sigmoid, self).__init__() self.num_factors = num_factors self.mlp1 = MLP(n_in, n_hid, n_hid, do_prob) self.mlp2 = MLP(n_hid * 2, n_hid, n_hid, do_prob) self.mlp3 = MLP(n_hid, n_hid, n_hid, do_prob) self.split_point = split_point if split_point == 0: self.mlp4 = MLP(n_hid * 3, n_hid, n_hid, do_prob) self.fc_out = nn.Linear(n_hid, num_factors) elif split_point == 1: self.mlp4 = nn.ModuleList([MLP(n_hid * 3, n_hid, n_hid, do_prob) for _ in range(num_factors)]) self.fc_out = nn.ModuleList([nn.Linear(n_hid, 1) for i in range(num_factors)]) elif split_point == 2: self.mlp3 = nn.ModuleList([MLP(n_hid, n_hid, n_hid, do_prob) for _ in range(num_factors)]) self.mlp4 = nn.ModuleList([MLP(n_hid * 3, n_hid, n_hid, do_prob) for _ in range(num_factors)]) self.fc_out = nn.ModuleList([nn.Linear(n_hid, 1) for i in range(num_factors)]) else: raise ValueError('Split point is not valid, must be 0, 1, or 2') self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight.data) m.bias.data.fill_(0.1) def edge2node(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. incoming = torch.matmul(rel_rec.t(), x) return incoming / incoming.size(1) def node2edge(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. receivers = torch.matmul(rel_rec, x) senders = torch.matmul(rel_send, x) edges = torch.cat([receivers, senders], dim=2) return edges def forward(self, inputs, rel_rec, rel_send): # Input shape: [num_sims, num_atoms, num_timesteps, num_dims] x = inputs.view(inputs.size(0), inputs.size(1), -1) # New shape: [num_sims, num_atoms, num_timesteps*num_dims] x = self.mlp1(x) # 2-layer ELU net per node x = self.node2edge(x, rel_rec, rel_send) x = self.mlp2(x) x_skip = x x = self.edge2node(x, rel_rec, rel_send) if self.split_point == 0: x = self.mlp3(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection x = self.mlp4(x) return self.fc_out(x) elif self.split_point == 1: x = self.mlp3(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection y_list = [] for i in range(self.num_factors): y = self.mlp4[i](x) y_list.append( self.fc_out[i](y) ) return torch.cat(y_list,dim=-1) elif self.split_point == 2: y_list = [] for i in range(self.num_factors): y = self.mlp3[i](x) y = self.node2edge(y, rel_rec, rel_send) y = torch.cat((y, x_skip), dim=2) # Skip connection y = self.mlp4[i](y) y_list.append( self.fc_out[i](y) ) return torch.cat(y_list,dim=-1) class RandomEncoder(nn.Module): """MLP decoder module.""" def __init__(self, edge_types_list, cuda_on): super(RandomEncoder, self).__init__() self.edge_types_list = edge_types_list self.cuda_on = cuda_on print('Using a random encoder.') def forward(self, inputs, rel_rec, rel_send): n = inputs.shape[1] output = Variable(torch.randn(inputs.shape[0],n**2-n,sum(self.edge_types_list))) if self.cuda_on: output = output.cuda() return output class OnesEncoder(nn.Module): """MLP decoder module.""" def __init__(self, edge_types_list, cuda_on): super(OnesEncoder, self).__init__() self.edge_types_list = edge_types_list self.cuda_on = cuda_on print('Using a "ones" encoder.') def forward(self, inputs, rel_rec, rel_send): n = inputs.shape[1] output = Variable(torch.ones(inputs.shape[0],n**2-n,sum(self.edge_types_list))) if self.cuda_on: output = output.cuda() return output class CNNEncoder(nn.Module): def __init__(self, n_in, n_hid, n_out, do_prob=0., factor=True): super(CNNEncoder, self).__init__() self.dropout_prob = do_prob self.factor = factor self.cnn = CNN(n_in * 2, n_hid, n_hid, do_prob) self.mlp1 = MLP(n_hid, n_hid, n_hid, do_prob) self.mlp2 = MLP(n_hid, n_hid, n_hid, do_prob) self.mlp3 = MLP(n_hid * 3, n_hid, n_hid, do_prob) self.fc_out = nn.Linear(n_hid, n_out) if self.factor: print("Using factor graph CNN encoder.") else: print("Using CNN encoder.") self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_normal(m.weight.data) m.bias.data.fill_(0.1) def node2edge_temporal(self, inputs, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. x = inputs.view(inputs.size(0), inputs.size(1), -1) receivers = torch.matmul(rel_rec, x) receivers = receivers.view(inputs.size(0) * receivers.size(1), inputs.size(2), inputs.size(3)) receivers = receivers.transpose(2, 1) senders = torch.matmul(rel_send, x) senders = senders.view(inputs.size(0) * senders.size(1), inputs.size(2), inputs.size(3)) senders = senders.transpose(2, 1) # receivers and senders have shape: # [num_sims * num_edges, num_dims, num_timesteps] edges = torch.cat([receivers, senders], dim=1) return edges def edge2node(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. incoming = torch.matmul(rel_rec.t(), x) return incoming / incoming.size(1) def node2edge(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. receivers = torch.matmul(rel_rec, x) senders = torch.matmul(rel_send, x) edges = torch.cat([receivers, senders], dim=2) return edges def forward(self, inputs, rel_rec, rel_send): # Input has shape: [num_sims, num_atoms, num_timesteps, num_dims] edges = self.node2edge_temporal(inputs, rel_rec, rel_send) x = self.cnn(edges) x = x.view(inputs.size(0), (inputs.size(1) - 1) * inputs.size(1), -1) x = self.mlp1(x) x_skip = x if self.factor: x = self.edge2node(x, rel_rec, rel_send) x = self.mlp2(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection x = self.mlp3(x) return self.fc_out(x) class CNNEncoder_multi(nn.Module): def __init__(self, n_in, n_hid, edge_types_list, do_prob=0., split_point=0, init_type='xavier_normal'): super(CNNEncoder_multi, self).__init__() self.dropout_prob = do_prob self.edge_types_list = edge_types_list self.init_type = init_type if self.init_type not in [ 'xavier_normal', 'orthogonal' ]: raise ValueError('This initialization type has not been coded') print('Using '+self.init_type+' for encoder weight initialization') self.cnn = CNN(n_in * 2, n_hid, n_hid, do_prob) self.mlp1 = MLP(n_hid, n_hid, n_hid, do_prob) self.split_point = split_point if split_point == 0: self.mlp2 = MLP(n_hid, n_hid, n_hid, do_prob) self.mlp3 = MLP(n_hid * 3, n_hid, n_hid, do_prob) self.fc_out = nn.Linear(n_hid, sum(edge_types_list)) elif split_point == 1: self.mlp2 = MLP(n_hid, n_hid, n_hid, do_prob) self.mlp3 = nn.ModuleList([MLP(n_hid * 3, n_hid, n_hid, do_prob) for _ in edge_types_list]) self.fc_out = nn.ModuleList([nn.Linear(n_hid, K) for K in edge_types_list]) elif split_point == 2: self.mlp2 = nn.ModuleList([MLP(n_hid, n_hid, n_hid, do_prob) for _ in edge_types_list]) self.mlp3 = nn.ModuleList([MLP(n_hid * 3, n_hid, n_hid, do_prob) for _ in edge_types_list]) self.fc_out = nn.ModuleList([nn.Linear(n_hid, K) for K in edge_types_list]) else: raise ValueError('Split point is not valid, must be 0, 1, or 2') self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): if self.init_type == 'orthogonal': nn.init.orthogonal_(m.weight.data) elif self.init_type == 'xavier_normal': nn.init.xavier_normal_(m.weight.data) m.bias.data.fill_(0.1) def node2edge_temporal(self, inputs, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. x = inputs.view(inputs.size(0), inputs.size(1), -1) receivers = torch.matmul(rel_rec, x) receivers = receivers.view(inputs.size(0) * receivers.size(1), inputs.size(2), inputs.size(3)) receivers = receivers.transpose(2, 1) senders = torch.matmul(rel_send, x) senders = senders.view(inputs.size(0) * senders.size(1), inputs.size(2), inputs.size(3)) senders = senders.transpose(2, 1) # receivers and senders have shape: # [num_sims * num_edges, num_dims, num_timesteps] edges = torch.cat([receivers, senders], dim=1) return edges def edge2node(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. incoming = torch.matmul(rel_rec.t(), x) return incoming / incoming.size(1) def node2edge(self, x, rel_rec, rel_send): # NOTE: Assumes that we have the same graph across all samples. receivers = torch.matmul(rel_rec, x) senders = torch.matmul(rel_send, x) edges = torch.cat([receivers, senders], dim=2) return edges def forward(self, inputs, rel_rec, rel_send): # Input has shape: [num_sims, num_atoms, num_timesteps, num_dims] edges = self.node2edge_temporal(inputs, rel_rec, rel_send) x = self.cnn(edges) x = x.view(inputs.size(0), (inputs.size(1) - 1) * inputs.size(1), -1) x = self.mlp1(x) x_skip = x x = self.edge2node(x, rel_rec, rel_send) if self.split_point == 0: x = self.mlp2(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection x = self.mlp3(x) return self.fc_out(x) elif self.split_point == 1: x = self.mlp2(x) x = self.node2edge(x, rel_rec, rel_send) x = torch.cat((x, x_skip), dim=2) # Skip connection y_list = [] for i in range(len(self.edge_types_list)): y = self.mlp3[i](x) y_list.append( self.fc_out[i](y) ) return torch.cat(y_list,dim=-1) elif self.split_point == 2: y_list = [] for i in range(len(self.edge_types_list)): y = self.mlp2[i](x) y = self.node2edge(y, rel_rec, rel_send) y = torch.cat((y, x_skip), dim=2) # Skip connection y = self.mlp3[i](y) y_list.append( self.fc_out[i](y) ) return torch.cat(y_list,dim=-1) class MLPDecoder(nn.Module): """MLP decoder module.""" def __init__(self, n_in_node, edge_types, msg_hid, msg_out, n_hid, do_prob=0., skip_first=False): super(MLPDecoder, self).__init__() self.msg_fc1 = nn.ModuleList( [nn.Linear(2 * n_in_node, msg_hid) for _ in range(edge_types)]) self.msg_fc2 = nn.ModuleList( [nn.Linear(msg_hid, msg_out) for _ in range(edge_types)]) self.msg_out_shape = msg_out self.skip_first_edge_type = skip_first self.out_fc1 = nn.Linear(n_in_node + msg_out, n_hid) self.out_fc2 = nn.Linear(n_hid, n_hid) self.out_fc3 = nn.Linear(n_hid, n_in_node) print('Using learned interaction net decoder.') self.dropout_prob = do_prob def single_step_forward(self, single_timestep_inputs, rel_rec, rel_send, single_timestep_rel_type): # single_timestep_inputs has shape # [batch_size, num_timesteps, num_atoms, num_dims] # single_timestep_rel_type has shape: # [batch_size, num_timesteps, num_atoms*(num_atoms-1), num_edge_types] # Node2edge receivers = torch.matmul(rel_rec, single_timestep_inputs) senders = torch.matmul(rel_send, single_timestep_inputs) pre_msg = torch.cat([receivers, senders], dim=-1) all_msgs = Variable(torch.zeros(pre_msg.size(0), pre_msg.size(1), pre_msg.size(2), self.msg_out_shape)) if single_timestep_inputs.is_cuda: all_msgs = all_msgs.cuda() if self.skip_first_edge_type: start_idx = 1 else: start_idx = 0 # Run separate MLP for every edge type # NOTE: To exlude one edge type, simply offset range by 1 for i in range(start_idx, len(self.msg_fc2)): msg = F.relu(self.msg_fc1[i](pre_msg)) msg = F.dropout(msg, p=self.dropout_prob) msg = F.relu(self.msg_fc2[i](msg)) msg = msg * single_timestep_rel_type[:, :, :, i:i + 1] all_msgs += msg # Aggregate all msgs to receiver agg_msgs = all_msgs.transpose(-2, -1).matmul(rel_rec).transpose(-2, -1) agg_msgs = agg_msgs.contiguous() # Skip connection aug_inputs = torch.cat([single_timestep_inputs, agg_msgs], dim=-1) # Output MLP pred = F.dropout(F.relu(self.out_fc1(aug_inputs)), p=self.dropout_prob) pred = F.dropout(F.relu(self.out_fc2(pred)), p=self.dropout_prob) pred = self.out_fc3(pred) # Predict position/velocity difference return single_timestep_inputs + pred def forward(self, inputs, rel_type, rel_rec, rel_send, pred_steps=1): # NOTE: Assumes that we have the same graph across all samples. inputs = inputs.transpose(1, 2).contiguous() sizes = [rel_type.size(0), inputs.size(1), rel_type.size(1), rel_type.size(2)] rel_type = rel_type.unsqueeze(1).expand(sizes) time_steps = inputs.size(1) assert (pred_steps <= time_steps) preds = [] # Only take n-th timesteps as starting points (n: pred_steps) last_pred = inputs[:, 0::pred_steps, :, :] curr_rel_type = rel_type[:, 0::pred_steps, :, :] # NOTE: Assumes rel_type is constant (i.e. same across all time steps). # Run n prediction steps for step in range(0, pred_steps): last_pred = self.single_step_forward(last_pred, rel_rec, rel_send, curr_rel_type) preds.append(last_pred) sizes = [preds[0].size(0), preds[0].size(1) * pred_steps, preds[0].size(2), preds[0].size(3)] output = Variable(torch.zeros(sizes)) if inputs.is_cuda: output = output.cuda() # Re-assemble correct timeline for i in range(len(preds)): output[:, i::pred_steps, :, :] = preds[i] pred_all = output[:, :(inputs.size(1) - 1), :, :] return pred_all.transpose(1, 2).contiguous() class MLPDecoder_multi(nn.Module): """MLP decoder module.""" def __init__(self, n_in_node, edge_types, edge_types_list, msg_hid, msg_out, n_hid, do_prob=0., skip_first=False, init_type='default'): super(MLPDecoder_multi, self).__init__() self.msg_fc1 = nn.ModuleList( [nn.Linear(2 * n_in_node, msg_hid) for _ in range(edge_types)]) self.msg_fc2 = nn.ModuleList( [nn.Linear(msg_hid, msg_out) for _ in range(edge_types)]) self.msg_out_shape = msg_out self.skip_first = skip_first self.edge_types = edge_types self.edge_types_list = edge_types_list self.out_fc1 = nn.Linear(n_in_node + msg_out, n_hid) self.out_fc2 = nn.Linear(n_hid, n_hid) self.out_fc3 = nn.Linear(n_hid, n_in_node) print('Using learned interaction net decoder.') self.dropout_prob = do_prob self.init_type = init_type if self.init_type not in [ 'xavier_normal', 'orthogonal', 'default' ]: raise ValueError('This initialization type has not been coded') #print('Using '+self.init_type+' for decoder weight initialization') if self.init_type != 'default': self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): if self.init_type == 'orthogonal': nn.init.orthogonal_(m.weight.data,gain=0.000001) elif self.init_type == 'xavier_normal': nn.init.xavier_normal_(m.weight.data,gain=0.000001) #m.bias.data.fill_(0.1) def single_step_forward(self, single_timestep_inputs, rel_rec, rel_send, single_timestep_rel_type): # single_timestep_inputs has shape # [batch_size, num_timesteps, num_atoms, num_dims] # single_timestep_rel_type has shape: # [batch_size, num_timesteps, num_atoms*(num_atoms-1), num_edge_types] # Node2edge receivers = torch.matmul(rel_rec, single_timestep_inputs) senders = torch.matmul(rel_send, single_timestep_inputs) pre_msg = torch.cat([receivers, senders], dim=-1) all_msgs = Variable(torch.zeros(pre_msg.size(0), pre_msg.size(1), pre_msg.size(2), self.msg_out_shape)) if single_timestep_inputs.is_cuda: all_msgs = all_msgs.cuda() # non_null_idxs = list of indexs of edge types which as non null (i.e. edges over which messages can be passed) non_null_idxs = list(range(self.edge_types)) if self.skip_first: # if skip_first is True, the first edge type in each factor block is null edge = 0 for k in self.edge_types_list: non_null_idxs.remove(edge) edge += k # Run separate MLP for every edge type # NOTE: To exlude one edge type, simply offset range by 1 for i in non_null_idxs: msg = F.relu(self.msg_fc1[i](pre_msg)) msg = F.dropout(msg, p=self.dropout_prob) msg = F.relu(self.msg_fc2[i](msg)) msg = msg * single_timestep_rel_type[:, :, :, i:i + 1] all_msgs += msg # Aggregate all msgs to receiver agg_msgs = all_msgs.transpose(-2, -1).matmul(rel_rec).transpose(-2, -1) agg_msgs = agg_msgs.contiguous() # Skip connection aug_inputs = torch.cat([single_timestep_inputs, agg_msgs], dim=-1) # Output MLP pred = F.dropout(F.relu(self.out_fc1(aug_inputs)), p=self.dropout_prob) pred = F.dropout(F.relu(self.out_fc2(pred)), p=self.dropout_prob) pred = self.out_fc3(pred) # Predict position/velocity difference return single_timestep_inputs + pred def forward(self, inputs, rel_type, rel_rec, rel_send, pred_steps=1): # NOTE: Assumes that we have the same graph across all samples. inputs = inputs.transpose(1, 2).contiguous() sizes = [rel_type.size(0), inputs.size(1), rel_type.size(1), rel_type.size(2)] rel_type = rel_type.unsqueeze(1).expand(sizes) time_steps = inputs.size(1) assert (pred_steps <= time_steps) preds = [] # Only take n-th timesteps as starting points (n: pred_steps) last_pred = inputs[:, 0::pred_steps, :, :] curr_rel_type = rel_type[:, 0::pred_steps, :, :] # NOTE: Assumes rel_type is constant (i.e. same across all time steps). # Run n prediction steps for step in range(0, pred_steps): last_pred = self.single_step_forward(last_pred, rel_rec, rel_send, curr_rel_type) preds.append(last_pred) sizes = [preds[0].size(0), preds[0].size(1) * pred_steps, preds[0].size(2), preds[0].size(3)] output = Variable(torch.zeros(sizes)) if inputs.is_cuda: output = output.cuda() # Re-assemble correct timeline for i in range(len(preds)): output[:, i::pred_steps, :, :] = preds[i] pred_all = output[:, :(inputs.size(1) - 1), :, :] return pred_all.transpose(1, 2).contiguous() class MLPDecoder_sigmoid(nn.Module): """MLP decoder module.""" def __init__(self, n_in_node, num_factors, msg_hid, msg_out, n_hid, do_prob=0., skip_first=False, init_type='default'): super(MLPDecoder_sigmoid, self).__init__() self.msg_fc1 = nn.ModuleList( [nn.Linear(2 * n_in_node, msg_hid) for _ in range(num_factors)]) self.msg_fc2 = nn.ModuleList( [nn.Linear(msg_hid, msg_out) for _ in range(num_factors)]) self.msg_out_shape = msg_out self.num_factors = num_factors self.out_fc1 = nn.Linear(n_in_node + msg_out, n_hid) self.out_fc2 = nn.Linear(n_hid, n_hid) self.out_fc3 = nn.Linear(n_hid, n_in_node) print('Using learned interaction net decoder.') self.dropout_prob = do_prob self.init_type = init_type if self.init_type not in [ 'xavier_normal', 'orthogonal', 'default' ]: raise ValueError('This initialization type has not been coded') #print('Using '+self.init_type+' for decoder weight initialization') if self.init_type != 'default': self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): if self.init_type == 'orthogonal': nn.init.orthogonal_(m.weight.data,gain=0.000001) elif self.init_type == 'xavier_normal': nn.init.xavier_normal_(m.weight.data,gain=0.000001) #m.bias.data.fill_(0.1) def single_step_forward(self, single_timestep_inputs, rel_rec, rel_send, single_timestep_rel_type): # single_timestep_inputs has shape # [batch_size, num_timesteps, num_atoms, num_dims] # single_timestep_rel_type has shape: # [batch_size, num_timesteps, num_atoms*(num_atoms-1), num_edge_types] # Node2edge receivers = torch.matmul(rel_rec, single_timestep_inputs) senders = torch.matmul(rel_send, single_timestep_inputs) pre_msg = torch.cat([receivers, senders], dim=-1) all_msgs = Variable(torch.zeros(pre_msg.size(0), pre_msg.size(1), pre_msg.size(2), self.msg_out_shape)) if single_timestep_inputs.is_cuda: all_msgs = all_msgs.cuda() # Run separate MLP for every edge type # NOTE: To exlude one edge type, simply offset range by 1 for i in range(self.num_factors): msg = F.relu(self.msg_fc1[i](pre_msg)) msg = F.dropout(msg, p=self.dropout_prob) msg = F.relu(self.msg_fc2[i](msg)) msg = msg * single_timestep_rel_type[:, :, :, i:i + 1] all_msgs += msg # Aggregate all msgs to receiver agg_msgs = all_msgs.transpose(-2, -1).matmul(rel_rec).transpose(-2, -1) agg_msgs = agg_msgs.contiguous() # Skip connection aug_inputs = torch.cat([single_timestep_inputs, agg_msgs], dim=-1) # Output MLP pred = F.dropout(F.relu(self.out_fc1(aug_inputs)), p=self.dropout_prob) pred = F.dropout(F.relu(self.out_fc2(pred)), p=self.dropout_prob) pred = self.out_fc3(pred) # Predict position/velocity difference return single_timestep_inputs + pred def forward(self, inputs, rel_type, rel_rec, rel_send, pred_steps=1): # NOTE: Assumes that we have the same graph across all samples. inputs = inputs.transpose(1, 2).contiguous() sizes = [rel_type.size(0), inputs.size(1), rel_type.size(1), rel_type.size(2)] rel_type = rel_type.unsqueeze(1).expand(sizes) time_steps = inputs.size(1) assert (pred_steps <= time_steps) preds = [] # Only take n-th timesteps as starting points (n: pred_steps) last_pred = inputs[:, 0::pred_steps, :, :] curr_rel_type = rel_type[:, 0::pred_steps, :, :] # NOTE: Assumes rel_type is constant (i.e. same across all time steps). # Run n prediction steps for step in range(0, pred_steps): last_pred = self.single_step_forward(last_pred, rel_rec, rel_send, curr_rel_type) preds.append(last_pred) sizes = [preds[0].size(0), preds[0].size(1) * pred_steps, preds[0].size(2), preds[0].size(3)] output = Variable(torch.zeros(sizes)) if inputs.is_cuda: output = output.cuda() # Re-assemble correct timeline for i in range(len(preds)): output[:, i::pred_steps, :, :] = preds[i] pred_all = output[:, :(inputs.size(1) - 1), :, :] return pred_all.transpose(1, 2).contiguous() class StationaryDecoder(nn.Module): """MLP decoder module.""" def __init__(self): super(StationaryDecoder, self).__init__() print('Using stationary decoder.') def forward(self, inputs, rel_type, rel_rec, rel_send, pred_steps=1): inputs = inputs.transpose(1, 2).contiguous() time_steps = inputs.size(1) assert (pred_steps <= time_steps) preds = [] # Only take n-th timesteps as starting points (n: pred_steps) last_pred = inputs[:, 0::pred_steps, :, :] # Run n prediction steps for step in range(0, pred_steps): preds.append(last_pred) sizes = [preds[0].size(0), preds[0].size(1) * pred_steps, preds[0].size(2), preds[0].size(3)] output = Variable(torch.zeros(sizes)) if inputs.is_cuda: output = output.cuda() # Re-assemble correct timeline for i in range(len(preds)): output[:, i::pred_steps, :, :] = preds[i] pred_all = output[:, :(inputs.size(1) - 1), :, :] return pred_all.transpose(1, 2).contiguous() class VelocityStepDecoder(nn.Module): """MLP decoder module.""" def __init__(self, delta_T=0.1): super(VelocityStepDecoder, self).__init__() self.delta_T = delta_T print('Using velocity step decoder.') def forward(self, inputs, rel_type, rel_rec, rel_send, pred_steps=1): # input dimensions ofinputs are [batch, particle, time, state] inputs = inputs.transpose(1, 2).contiguous() time_steps = inputs.size(1) assert (pred_steps <= time_steps) preds = [] # Only take n-th timesteps as starting points (n: pred_steps) last_pred = inputs[:, 0::pred_steps, :, :] # Run n prediction steps for step in range(0, pred_steps): last_pred[:, :, :, 0:2] = last_pred[:, :, :, 0:2] + self.delta_T*last_pred[:, :, :, 2:] preds.append(last_pred) sizes = [preds[0].size(0), preds[0].size(1) * pred_steps, preds[0].size(2), preds[0].size(3)] output = Variable(torch.zeros(sizes)) if inputs.is_cuda: output = output.cuda() # Re-assemble correct timeline for i in range(len(preds)): output[:, i::pred_steps, :, :] = preds[i] pred_all = output[:, :(inputs.size(1) - 1), :, :] return pred_all.transpose(1, 2).contiguous()
38.315948
119
0.587969
5,469
38,201
3.878588
0.052295
0.025269
0.013907
0.031256
0.901235
0.894022
0.890675
0.885489
0.875495
0.866349
0
0.020029
0.292924
38,201
997
120
38.315948
0.765281
0.122615
0
0.818584
0
0
0.027543
0
0
0
0
0
0.007375
1
0.078171
false
0
0.00885
0
0.160767
0.017699
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
eab2045e66a8039a8b219c7bc6ec252005b748dd
4,461
py
Python
tests/_async/test_take_while.py
christopher-henderson/PyStream
8c76a634448d98591aa68087bf78c6cd4da6a6b7
[ "MIT" ]
null
null
null
tests/_async/test_take_while.py
christopher-henderson/PyStream
8c76a634448d98591aa68087bf78c6cd4da6a6b7
[ "MIT" ]
12
2020-10-10T14:28:10.000Z
2020-10-28T05:42:34.000Z
tests/_async/test_take_while.py
christopher-henderson/PyStream
8c76a634448d98591aa68087bf78c6cd4da6a6b7
[ "MIT" ]
null
null
null
import unittest from pstream import AsyncStream from tests._async.utils import Driver, Method class TakeWhile(Method): def __init__(self, args): super(TakeWhile, self).__init__(AsyncStream.take_while, args) class TestTakeWhile(unittest.TestCase): @Driver(initial=range(10), method=TakeWhile(args=[lambda x: x != 5]), want=[0, 1, 2, 3, 4]) def test__a_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda x: x != 5]), want=[0, 1, 2, 3, 4]) def test__a_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda x: x != 5]), want=[0, 1, 2, 3, 4]) def test__s_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda x: x != 5]), want=[0, 1, 2, 3, 4]) def test__s_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) ############################### @Driver(initial=range(10), method=TakeWhile(args=[lambda _: True]), want=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) def test1__a_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda _: True]), want=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) def test1__a_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda _: True]), want=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) def test1__s_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda _: True]), want=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) def test1__s_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) ############################### @Driver(initial=[], method=TakeWhile(args=[lambda _: False]), want=[]) def test2__a_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=[], method=TakeWhile(args=[lambda _: False]), want=[]) def test2__a_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=[], method=TakeWhile(args=[lambda _: False]), want=[]) def test2__s_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=[], method=TakeWhile(args=[lambda _: False]), want=[]) def test2__s_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) ############################### @Driver(initial=range(10), method=TakeWhile(args=[lambda _: False]), want=[]) def test3__a_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda _: False]), want=[]) def test3__a_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda _: False]), want=[]) def test3__s_a(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) @Driver(initial=range(10), method=TakeWhile(args=[lambda _: False]), want=[]) def test3__s_s(self, got=None, want=None, exception=None): if exception is not None: raise exception self.assertEqual(got, want) if __name__ == '__main__': unittest.main()
37.175
108
0.61466
609
4,461
4.374384
0.090312
0.078078
0.114114
0.15015
0.918919
0.918919
0.918919
0.918919
0.918919
0.918919
0
0.029095
0.229545
4,461
119
109
37.487395
0.745999
0
0
0.719101
0
0
0.001832
0
0
0
0
0
0.179775
1
0.191011
false
0
0.033708
0
0.247191
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
eae46877b4fdb341ec7c2f6a8ce08499e11603fb
69,580
py
Python
src/genesis_api_wrapper/data.py
j-suchard/destatis-genesis-api
b0bbec0283ba41707d543b11e556ab0997e5f792
[ "BSD-3-Clause" ]
null
null
null
src/genesis_api_wrapper/data.py
j-suchard/destatis-genesis-api
b0bbec0283ba41707d543b11e556ab0997e5f792
[ "BSD-3-Clause" ]
null
null
null
src/genesis_api_wrapper/data.py
j-suchard/destatis-genesis-api
b0bbec0283ba41707d543b11e556ab0997e5f792
[ "BSD-3-Clause" ]
null
null
null
import datetime import typing from . import enums, tools class DataAPIWrapper: def __init__( self, username: str, password: str, language: enums.Language = enums.Language.GERMAN ): """Create a new part wrapper for the methods listed in the DataAPIWrapper (2.5) section :param username: The username which will be used for authenticating at the database. Due to constraints of the database the username needs to be exactly 10 characters long and may not contain any whitespaces :type username: str :param password: The password which will be used for authenticating at the database. Due to constraints of the database the password needs to be at least 10 characters long, may not exceed 20 characters and may not contain any whitespaces :type password: str :param language: The language in which the responses are returned by the database. :py:enum:mem:`~enums.Language.GERMAN` has the most compatibility with the database since most of the tables are on German. Therefore, this parameter defaults to :py:enum:mem:`~enums.Language.GERMAN` :type language: enums.Language :raise ValueError: The username or the password did not match the constraints stated in their description. """ if " " in username: raise ValueError("The username may not contain any whitespaces") if len(username) != 10: raise ValueError("The username may only be 10 characters long") if " " in password: raise ValueError("The password may not contain any whitespaces") if len(password) < 10: raise ValueError( f"The password may not be shorter than 10 characters. Current " f"length: {len(password)}" ) if len(password) > 20: raise ValueError( f"The password may not be longer that 20 characters. Current " f"length: {len(password)}" ) self._service_path = "/data" # Create the base parameters self._base_parameter = { "username": username, "password": password, "language": language.value, } async def chart2result( self, # Selection Specifiers object_name: str, # Chart Settings chart_type: enums.ChartType.LINE_CHART, image_size: enums.ImageSize = enums.ImageSize.LEVEL_3, draw_points_in_line_chart: bool = False, compress_y_axis: bool = False, show_top_values_first: bool = False, # Object Storage Settings object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, ) -> dict: """Download a graph for a result table The file will be downloaded into a local temporary path. The path to the image will be returned instead of the whole image :param object_location: The location in which the object is stored, defaults to :py:enum:mem:`~enums.GENESISArea` :type object_location: enums.ObjectStorage :param object_name: The identifier of the result table [required] :type object_name: str :param chart_type: The type of chart which shall be downloaded [required] :type chart_type: enums.ChartType :param image_size: The size of the image which shall be downloaded [optional, default 1024x768 pixels] :type image_size: enums.ImageSize :param draw_points_in_line_chart: Highlight data points in a line chart [optional, only allowed if chart_type is line chart] :type draw_points_in_line_chart: bool :param compress_y_axis: Compress the y-axis to fit the values :type compress_y_axis: bool :param show_top_values_first: When using a Pie Chart: Display the top five (5) values and group all other values into one extra slice When using any other type of chart: Show the top four (4) values from the dataset instead of the first four values :type show_top_values_first: bool :return: The path to the image or the response from the server if there is an error :rtype: dict """ # Check if the object name is set correctly if not object_name: raise ValueError("The object_name is a required parameter") # Check if the length of the object name is between 1 and 15 characters if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Validate that a chart type is set: if chart_type is None: raise ValueError("The chart_type is a required parameter") # Check that draw_points_in_line_chart is only working if the chart type is line chart if draw_points_in_line_chart and chart_type != enums.ChartType.LINE_CHART: raise ValueError( "The parameter draw_points_in_line_chart is only supported for " "enums.ChartType.LINE_CHART" ) # Build the query parameters query_parameter = self._base_parameter | { "name": object_name, "area": object_location.value, "enums.ChartType": chart_type.value, "drawpoints": str(draw_points_in_line_chart), "zoom": image_size.value, "focus": str(compress_y_axis), "tops": str(show_top_values_first), "format": "png", } # Build the query path query_path = self._service_path + "/chart2result" # Download the image return await tools.get_database_response(query_path, query_parameter) async def chart2table( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Chart settings: chart_type: enums.ChartType = enums.ChartType.LINE_CHART, image_size: enums.ImageSize = enums.ImageSize.LEVEL_3, draw_points_in_line_chart: bool = False, compress_y_axis: bool = False, show_top_values_first: bool = False, time_slices: int = None, ) -> dict: """Download a graph for a table The image of the graph will be downloaded into a temporary directory and the path to the image will be returned :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param chart_type: The type of chart which shall be downloaded, defaults to :attr:`~enums.ChartType.LINE_CHART` :type chart_type: enums.ChartType, optional :param image_size: The size of the image which shall be downloaded, defaults to :attr:`~enums.ImageSize.LEVEL3` :type image_size: enums.ImageSize, optional :param draw_points_in_line_chart: Highlight the data points in a line chart, only allowed if chart_type is :attr:`enums.ChartType.LINE_CHART` :type draw_points_in_line_chart: bool, optional :param compress_y_axis: Compress the y-axis to fit the values :type compress_y_axis: bool, optional :param show_top_values_first: When using :attr:`enums.ChartType.PIE_CHART` as chart_type: Display the top five (5) values as single slices and group all other slices into one other slice. When using any other :class:`enums.ChartType`: Display the top four (4) values instead of the first four (4) values :type show_top_values_first: bool, optional :param time_slices: The number of time slices into which the data shall be accumulated :type time_slices: int, optional :return: The path to the image or the file downloaded from the server. :rtype: dict """ # Check if the table name was set correctly if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Check if any illegal parameter combination was set if draw_points_in_line_chart and chart_type is not enums.ChartType.LINE_CHART: raise ValueError( "The parameter draw_points_in_line_chart is only supported for " "enums.ChartType.LINE_CHART" ) # Convert the times to string _time_string = ( None if updated_after is None else updated_after.strftime("%d.%m.%Y %H:%M:%Sh") ) # Build the query parameters query_parameter = self._base_parameter | { "name": object_name, "area": object_location.value, "enums.ChartType": chart_type.value, "drawpoints": str(draw_points_in_line_chart), "zoom": image_size.value, "focus": str(compress_y_axis), "tops": str(show_top_values_first), "startyear": start_year, "endyear": end_year, "timeslices": time_slices, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "format": "png", "stand": _time_string, } # Build the query path query_path = self._service_path + "/chart2table" # Download the image return await tools.get_database_response(query_path, query_parameter) async def chart2timeseries( self, object_name: str, # Selection Specifier contents: typing.Optional[list[str]] = None, object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Chart settings: chart_type: enums.ChartType = enums.ChartType.LINE_CHART, image_size: enums.ImageSize = enums.ImageSize.LEVEL_3, draw_points_in_line_chart: bool = False, compress_y_axis: bool = False, show_top_values_first: bool = False, time_slices: int = None, ) -> dict: """Download a graph for a timeseries The image of the graph will be downloaded into a temporary directory and the path to the image will be returned :param object_name: The identifier of the timeseries :type object_name: str :param contents: The names of the values which shall be in the chart :type contents: list[str], optional :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: enums.ObjectStorage, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param chart_type: The type of chart which shall be downloaded, defaults to :py:enum:mem:`~enums.ChartType.LINE_CHART` :type chart_type: enums.ChartType, optional :param image_size: The size of the image which shall be downloaded, defaults to :py:enum:mem:`~enums.ImageSize.LEVEL_3` :type image_size: enums.ImageSize, optional :param draw_points_in_line_chart: Highlight the data points in a line chart, only allowed if chart_type is :attr:`enums.ChartType.LINE_CHART` :type draw_points_in_line_chart: bool, optional :param compress_y_axis: Compress the y-axis to fit the values :type compress_y_axis: bool, optional :param show_top_values_first: When using :py:enum:mem:`~enums.ChartType.PIE_CHART` as chart_type: Display the top five (5) values as single slices and group all other slices into one other slice. When using any other :enum:`~enums.ChartType`: Display the top four (4) values instead of the first four (4) values :type show_top_values_first: bool, optional :param time_slices: The number of time slices into which the data shall be accumulated :type time_slices: int, optional :return: The path to the image or the file downloaded from the server. :rtype: dict """ # Check if the table name was set correctly if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Check if any illegal parameter combination was set if draw_points_in_line_chart and chart_type is not enums.ChartType.LINE_CHART: raise ValueError( "The parameter draw_points_in_line_chart is only supported for " "enums.ChartType.LINE_CHART" ) # Convert the times to string _time_string = ( None if updated_after is None else updated_after.strftime("%d.%m.%Y %H:%M:%Sh") ) # Build the query parameters query_parameter = self._base_parameter | { "name": object_name, "area": object_location.value, "enums.ChartType": chart_type.value, "drawpoints": str(draw_points_in_line_chart), "zoom": image_size.value, "focus": str(compress_y_axis), "tops": str(show_top_values_first), "contents": ",".join(contents) if contents is not None else None, "startyear": start_year, "endyear": end_year, "timeslices": time_slices, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "format": "png", "stand": _time_string, } # Build the query path query_path = self._service_path + "/chart2timeseries" # Download the image return await tools.get_database_response(query_path, query_parameter) async def cube( self, object_name: str, # Selection Specifier contents: typing.Optional[list[str]] = None, object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Cube settings values: bool = True, metadata: bool = True, additional_metadata: bool = False, time_slices: int = None, ) -> dict: """Get a datacube embedded into an dictionary This method requires the "premium" access to the database. :param object_name: The identifier of the data cube :type object_name: str :param contents: The names of the values which shall be in the chart :type contents: list[str], optional :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.GENESISObjectLocation.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param values: Should values be returned, defaults to `True` :type values: bool, optional :param metadata: Should metadata be returned, defaults to `True` :type metadata: bool, optional :param additional_metadata: Should additional metadata be returned, defaults to `False` :type additional_metadata: bool, optional :param time_slices: The number of time slices into which the data shall be accumulated :type time_slices: int, optional :return: The csv embedded in the response body :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Convert the times to string _time_string = ( None if updated_after is None else updated_after.strftime("%d.%m.%Y %H:%M:%Sh") ) # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "contents": ",".join(contents) if contents is not None else None, "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "format": "csv", "stand": _time_string, "values": str(values), "metadata": str(metadata), "additionals": str(additional_metadata), } # Build the query path query_path = self._service_path + "/cube" # Download the file return await tools.get_database_response(query_path, query_parameters) async def cube_file( self, object_name: str, # Selection Specifier contents: typing.Optional[list[str]] = None, object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Cube settings values: bool = True, metadata: bool = True, additional_metadata: bool = False, time_slices: int = None, ): """Download a data cube as csv-file (seperator: `;`) This method requires the "premium" access to the database. :param object_name: The identifier of the data cube :type object_name: str :param contents: The names of the values which shall be in the chart :type contents: list[str], optional :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: enums.ObjectStorage, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart, defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param values: Should values be returned, defaults to `True` :type values: bool, optional :param metadata: Should metadata be returned, defaults to `True` :type metadata: bool, optional :param additional_metadata: Should additional metadata be returned, defaults to `False` :type additional_metadata: bool, optional :param time_slices: The number of time slices into which the data shall be accumulated :type time_slices: int, optional :return: The csv embedded in the response body :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Convert the times to string _time_string = ( None if updated_after is None else updated_after.strftime("%d.%m.%Y %H:%M:%Sh") ) # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "contents": ",".join(contents) if contents is not None else None, "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "format": "csv", "stand": _time_string, "values": str(values), "metadata": str(metadata), "additionals": str(additional_metadata), } # Build the query path query_path = self._service_path + "/cubefile" # Download the file return await tools.get_database_response(query_path, query_parameters) async def map2result( self, object_name: str, object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, number_of_distinction_classes: typing.Optional[int] = 5, classify_by_same_value_range: typing.Optional[bool] = True, image_size: enums.ImageSize = enums.ImageSize.LEVEL_3, ): """Download a map displaying the values of the specified result table :param object_name: The identifier of the data cube :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: enums.ObjectStorage, optional :param number_of_distinction_classes: The number of distinction classes to be generated, defaults to 5 :type number_of_distinction_classes: int, optional :param classify_by_same_value_range: If this is set to `True`, the distinction classes have the same size. If this is set to `False` the distinction classes have different sizes, but the same amount of values in them. Defaults to `True` :type classify_by_same_value_range: bool, optional :param image_size: The size of the image which shall be downloaded, defaults to :py:enum:mem:`~enums.ImageSize.LEVEL_3` :type image_size: enums.ImageSize, optional :return: The path to the image or the file downloaded from the server. :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") if not (2 <= number_of_distinction_classes <= 5): raise ValueError("The number of distinction classes need to be between 2 and 5") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "mapType": 0, "classes": number_of_distinction_classes, "classification": int(classify_by_same_value_range), "zoom": image_size.value, "format": "png", } # Build the query path query_path = self._service_path + "/map2result" # Download the file return await tools.get_database_response(query_path, query_parameters) async def map2table( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Map Settings number_of_distinction_classes: typing.Optional[int] = 5, classify_by_same_value_range: typing.Optional[bool] = True, image_size: enums.ImageSize = enums.ImageSize.LEVEL_3, ): """ Download a map visualizing the selected data from the table :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param number_of_distinction_classes: The number of distinction classes to be generated, defaults to 5 :type number_of_distinction_classes: int, optional :param classify_by_same_value_range: If this is set to `True`, the distinction classes have the same size. If this is set to `False` the distinction classes have different sizes, but the same amount of values in them. Defaults to `True` :type classify_by_same_value_range: bool, optional :param image_size: The size of the image which shall be downloaded, defaults to :py:enum:mem:`~enums.ImageSize.LEVEL_3` :type image_size: enums.ImageSize, optional :return: The path to the image or the file downloaded from the server. :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") if not (2 <= number_of_distinction_classes <= 5): raise ValueError("The number of distinction classes need to be between 2 and 5") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "mapType": 0, "classes": number_of_distinction_classes, "classification": int(classify_by_same_value_range), "zoom": image_size.value, "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "format": "png", } # Build the query path query_path = self._service_path + "/map2table" # Download the file return await tools.get_database_response(query_path, query_parameters) async def map2timeseries( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Map Settings number_of_distinction_classes: typing.Optional[int] = 5, classify_by_same_value_range: typing.Optional[bool] = True, image_size: enums.ImageSize = enums.ImageSize.LEVEL_3, ): """ Download a map visualizing the selected data from the table :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: Data starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: Data after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param number_of_distinction_classes: The number of distinction classes to be generated, defaults to 5 :type number_of_distinction_classes: int, optional :param classify_by_same_value_range: If this is set to `True`, the distinction classes have the same size. If this is set to `False` the distinction classes have different sizes, but the same amount of values in them. Defaults to `True` :type classify_by_same_value_range: bool, optional :param image_size: The size of the image which shall be downloaded, defaults to :py:enum:mem:`~enums.ImageSize.LEVEL_3` :type image_size: enums.ImageSize, optional :return: The path to the image or the file downloaded from the server. :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") if not (2 <= number_of_distinction_classes <= 5): raise ValueError("The number of distinction classes need to be between 2 and 5") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "mapType": 0, "classes": number_of_distinction_classes, "classification": int(classify_by_same_value_range), "zoom": image_size.value, "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "format": "png", } # Build the query path query_path = self._service_path + "/map2timeseries" # Download the file return await tools.get_database_response(query_path, query_parameters) async def result( self, object_name: str, object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, remove_empty_rows: bool = False, ): """ Get the contents of the result table embedded in the response :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param remove_empty_rows: Remove empty rows from the embedded CSV-file :type remove_empty_rows: bool, optional :return: Dictionary containing the response :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Build query parameters query_parameter = self._base_parameter | { "name": object_name, "area": object_location.value, "compress": str(remove_empty_rows), } query_path = self._service_path + "/result" return await tools.get_database_response(query_path, query_parameter) async def table( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Output Selection generate_job: bool = False, remove_emtpy_rows: bool = False, switch_rows_and_columns: bool = False, ): """ Download a table by embedding it into the JSON Response :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param generate_job: Generate a Job if the table cannot be pulled directly, defaults to ``False`` :type generate_job: bool :param remove_emtpy_rows: Remove all empty data rows from the response, defaults to ``False`` :type remove_emtpy_rows: bool :param switch_rows_and_columns: Switch the rows and columns in the response, defaults to ``False`` :type switch_rows_and_columns: bool :return: The specified table data embedded in the response data :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "compress": str(remove_emtpy_rows), "transpose": str(switch_rows_and_columns), "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "job": str(generate_job), } # Build the query path query_path = self._service_path + "/table" # Get the response return await tools.get_database_response(query_path, query_parameters) async def tablefile( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Output Selection generate_job: bool = False, remove_emtpy_rows: bool = False, switch_rows_and_columns: bool = False, file_format: enums.FileFormat = enums.FileFormat.CSV, ): """ Download a table as file :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param generate_job: Generate a Job if the table cannot be pulled directly, defaults to ``False`` :type generate_job: bool :param remove_emtpy_rows: Remove all empty data rows from the response, defaults to ``False`` :type remove_emtpy_rows: bool :param switch_rows_and_columns: Switch the rows and columns in the response, defaults to ``False`` :type switch_rows_and_columns: bool :param file_format: The file format which shall be returned, defaults to :py:enum:mem:`~enums.FileType.CSV` :type file_format: enums.FileType :return: The specified table data embedded in the response data :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "compress": str(remove_emtpy_rows), "transpose": str(switch_rows_and_columns), "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "job": str(generate_job), "format": file_format.value, } # Build the query path query_path = self._service_path + "/tablefile" # Get the response return await tools.get_database_response(query_path, query_parameters) async def timeseries( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Output Selection generate_job: bool = False, remove_emtpy_rows: bool = False, switch_rows_and_columns: bool = False, ): """ Download a timeseries embedded into a JSON response :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param generate_job: Generate a Job if the table cannot be pulled directly, defaults to ``False`` :type generate_job: bool :param remove_emtpy_rows: Remove all empty data rows from the response, defaults to ``False`` :type remove_emtpy_rows: bool :param switch_rows_and_columns: Switch the rows and columns in the response, defaults to ``False`` :type switch_rows_and_columns: bool :return: The specified table data embedded in the response data :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "compress": str(remove_emtpy_rows), "transpose": str(switch_rows_and_columns), "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "job": str(generate_job), } # Build the query path query_path = self._service_path + "/timeseries" # Get the response return await tools.get_database_response(query_path, query_parameters) async def timeseriesfile( self, object_name: str, # Selection Specifier object_location: enums.ObjectStorage = enums.ObjectStorage.ALL, updated_after: typing.Optional[datetime.datetime] = None, start_year: typing.Optional[str] = None, end_year: typing.Optional[str] = None, region_code: typing.Optional[str] = None, region_key: typing.Optional[str] = None, # DataAPIWrapper Classifiers classifying_code_1: typing.Optional[str] = None, classifying_key_1: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_2: typing.Optional[str] = None, classifying_key_2: typing.Optional[typing.Union[str, list[str]]] = None, classifying_code_3: typing.Optional[str] = None, classifying_key_3: typing.Optional[typing.Union[str, list[str]]] = None, # Output Selection generate_job: bool = False, remove_emtpy_rows: bool = False, switch_rows_and_columns: bool = False, ): """ Download a timeseries embedded into a JSON response :param object_name: The identifier of the table [required, 1-15 characters] :type object_name: str :param object_location: The location in which the table is stored, defaults to :py:enum:mem:`~enums.ObjectStorage.ALL` :type object_location: str, optional :param updated_after: Time after which the table needs to have been updated to be returned, defaults to :attr:`None` :type updated_after: datetime, optional :param start_year: DataAPIWrapper starting from this year will be selected for the chart , defaults to :attr:`None` :type start_year: str, optional :param end_year: DataAPIWrapper after this year will be excluded for the chart, defaults to :attr:`None` :type end_year: str, optional :param region_code: Code of the regional classifier which shall be used to limit the regional component of the data, defaults to :attr:`None` :type region_code: str, optional :param region_key: The official municipality key (AGS) specifying from which municipalities the data shall be taken from, defaults to :attr:`None` :type region_key: str, optional :param classifying_code_1: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_1: str, optional :param classifying_key_1: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_1: str, optional :param classifying_code_2: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_2: str, optional :param classifying_key_2: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_2: str, optional :param classifying_code_3: Code of the classificator which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_code_3: str, optional :param classifying_key_3: Code of the classificator value which shall be used to limit the data selection further, defaults to :attr:`None` :type classifying_key_3: str, optional :param generate_job: Generate a Job if the table cannot be pulled directly, defaults to ``False`` :type generate_job: bool :param remove_emtpy_rows: Remove all empty data rows from the response, defaults to ``False`` :type remove_emtpy_rows: bool :param switch_rows_and_columns: Switch the rows and columns in the response, defaults to ``False`` :type switch_rows_and_columns: bool :return: The specified table data embedded in the response data :rtype: dict """ if not object_name: raise ValueError("The object_name is a required parameter") if not (1 <= len(object_name.strip()) <= 15): raise ValueError("The object_name may only contain between 1 and 15 characters") # Build the query parameters query_parameters = self._base_parameter | { "name": object_name, "area": object_location.value, "compress": str(remove_emtpy_rows), "transpose": str(switch_rows_and_columns), "startyear": start_year, "endyear": end_year, "regionalvariable": region_code, "regionalkey": region_key, "classifyingvariable1": classifying_code_1, "classifyingkey1": classifying_key_1, "classifyingvariable2": classifying_code_2, "classifyingkey2": classifying_key_2, "classifyingvariable3": classifying_code_3, "classifyingkey3": classifying_key_3, "job": str(generate_job), "format": "csv", } # Build the query path query_path = self._service_path + "/timeseriesfile" # Get the response return await tools.get_database_response(query_path, query_parameters)
52.315789
99
0.656309
8,694
69,580
5.089717
0.037497
0.035028
0.040497
0.044339
0.950938
0.943141
0.936949
0.928565
0.923616
0.920113
0
0.009551
0.271716
69,580
1,329
100
52.355154
0.863678
0.044266
0
0.866438
0
0
0.150431
0.005237
0
0
0
0
0
1
0.001712
false
0.017123
0.005137
0
0.030822
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d8267a38e3cf738ba3ba2a2e57c980c0c7dc55d0
5,656
py
Python
ElectionsTools/cases/data.py
tgquintela/ElectionsTools
a522b7503a9a75ab607010d5b873e3c36e9bd443
[ "MIT" ]
null
null
null
ElectionsTools/cases/data.py
tgquintela/ElectionsTools
a522b7503a9a75ab607010d5b873e3c36e9bd443
[ "MIT" ]
null
null
null
ElectionsTools/cases/data.py
tgquintela/ElectionsTools
a522b7503a9a75ab607010d5b873e3c36e9bd443
[ "MIT" ]
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -*- cand_fus_prov = {'PSOE': ['PSE-EE (PSOE)', "PSOE", "PSC", "PSE-EE", "PSdeG-PSOE", "PSC-PSOE", "PSOE-NCa"], "PODEMOS": ["PODEMOS-AHAL DUGU", "PODEMOS", "PODEMOS-COMPROMÍS", "PODEMOS-En Marea-ANOVA-EU", "PODEMOS-Ahora Alto Aragón en Común", "PODEMOS - COMPROMIS", "PODEMOS-EN MAREA-ANOVA-EU", "EN COMÚ"], 'EH Bildu': ['EH Bildu'], 'UPYD': ['UPYD'], "C's": ["C's", "C´s"], "PP": ["PP-PAR", "PP", "PP-FORO", "UPN-PP"], "EAJ-PNV": ["EAJ-PNV"], "PACMA": ["PACMA"], "IU": ["IU-UPeC", "UNIDAD POPULAR", "UPeC-IU-IZCA", "IU-UpeC", "IUC-UPeC", "IU-B-UPeC", "IU-CHA-UPeC", "UP-UPeC", "EUPV-UPeC", "UPeC-IU", "IULV-CA, UPeC", "IULV-CA,UPeC", "IU-UPeC-IAS", "UP: IULV-CA, UPeC", "UPB: IU-UPeC"], "RECORTES CERO-GRUPO VERDE": ["RECORTES CERO-GRUPO VERDE"], "CANARIAS DECIDE": ["CANARIAS DECIDE"], "PCPE": ["PCPE"], "Unió": ["Unio.Cat", "unio.cat"], "VOX": ["VOX"], "ERC": ["ERC-CATSI", "ERC-CATSÍ"], "+MAS+": ["+MAS+"], "Independientes-FIA": ["Independientes-FIA"], "FE de las JONS": ["FE de las JONS"], "PH": ["PH"], "OE": ["OE"], "Ln": ["Ln"], "CENTRO MODERADO": ["CENTRO MODERADO"], "P-LIB": ["P-LIB"], "FDEE": ["FDEE"], "ARAPV": ["ARAPV", "ARA, PV"], "SOMVAL": ["SOMVAL"], "PLD": ["PLD"], "PFyV": ["PFyV"], "PUM+J": ["PUM+J"], "MÉS": ["MÉS"], "EL PI": ["EL PI"], "SAIn": ["SAIn"], "CCD": ["CCD"], "DN": ["DN"], "EN POSITIU": ["EN POSITIU"], "DL": ["DL"], "mlgXSÍ": ["mlgXSÍ"], "PCPC": ["PCPC"], "EB": ["EB", "EB - AZ", "EB-AZ"], "AVANT": ["AVANT"], "SOLUCIONA": ["SOLUCIONA"], "X ESQ": ["X ESQ"], "PREPAL": ["PREPAL"], "GBAI": ["GBAI"], "CCa-PNC": ["CCa-PNC"], "X LA IZQUIERDA-LOS VERDES": ["X LA IZQUIERDA-LOS VERDES"], "PCOE": ["PCOE"], "EU-eX": ["EU-eX"], "IFem": ["IFem"], "NÓS": ["NÓS"], "PT": ["PT"], "CILUS": ["CILUS"], "AJU": ["AJU"], "CRA": ["CRA"], "EZKERRA": ["EZKERRA"], "JS,PC": ["JS,PC"]} grup_fus_prov = {'PSOE': ['PSE-EE (PSOE)', "PSOE", "PSE-EE", "PSdeG-PSOE", "PSC", "PSOE-NCa"], "PSC-PSOE": ["PSC-PSOE"], "PODEMOS": ["PODEMOS-AHAL DUGU", "PODEMOS", "PODEMOS-Ahora Alto Aragón en Común"], "EN COMÚ": ["EN COMÚ"], "PODEMOS-COMPROMÍS": ["PODEMOS-COMPROMÍS", "PODEMOS - COMPROMIS"], "PODEMOS-En Marea-ANOVA-EU": ["PODEMOS-En Marea-ANOVA-EU", "PODEMOS-EN MAREA-ANOVA-EU"], 'EH Bildu': ['EH Bildu'], 'UPYD': ['UPYD'], "C's": ["C's", "C´s"], "PP": ["PP-PAR", "PP", "PP-FORO", "UPN-PP"], "EAJ-PNV": ["EAJ-PNV"], "PACMA": ["PACMA"], "IU": ["IU-UPeC", "UNIDAD POPULAR", "UPeC-IU-IZCA", "IU-UpeC", "IUC-UPeC", "IU-B-UPeC", "IU-CHA-UPeC", "UP-UPeC", "EUPV-UPeC", "UPeC-IU", "IULV-CA, UPeC", "IULV-CA,UPeC", "IU-UPeC-IAS", "UP: IULV-CA, UPeC", "UPB: IU-UPeC"], "RECORTES CERO-GRUPO VERDE": ["RECORTES CERO-GRUPO VERDE"], "CANARIAS DECIDE": ["CANARIAS DECIDE"], "PCPE": ["PCPE"], "Unió": ["Unio.Cat", "unio.cat"], "VOX": ["VOX"], "ERC": ["ERC-CATSI", "ERC-CATSÍ"], "+MAS+": ["+MAS+"], "Independientes-FIA": ["Independientes-FIA"], "FE de las JONS": ["FE de las JONS"], "PH": ["PH"], "OE": ["OE"], "Ln": ["Ln"], "CENTRO MODERADO": ["CENTRO MODERADO"], "P-LIB": ["P-LIB"], "FDEE": ["FDEE"], "ARAPV": ["ARAPV", "ARA, PV"], "SOMVAL": ["SOMVAL"], "PLD": ["PLD"], "PFyV": ["PFyV"], "PUM+J": ["PUM+J"], "MÉS": ["MÉS"], "EL PI": ["EL PI"], "SAIn": ["SAIn"], "CCD": ["CCD"], "DN": ["DN"], "EN POSITIU": ["EN POSITIU"], "DL": ["DL"], "mlgXSÍ": ["mlgXSÍ"], "PCPC": ["PCPC"], "EB": ["EB", "EB - AZ", "EB-AZ"], "AVANT": ["AVANT"], "SOLUCIONA": ["SOLUCIONA"], "X ESQ": ["X ESQ"], "PREPAL": ["PREPAL"], "GBAI": ["GBAI"], "CCa-PNC": ["CCa-PNC"], "X LA IZQUIERDA-LOS VERDES": ["X LA IZQUIERDA-LOS VERDES"], "PCOE": ["PCOE"], "EU-eX": ["EU-eX"], "IFem": ["IFem"], "NÓS": ["NÓS"], "PT": ["PT"], "CILUS": ["CILUS"], "AJU": ["AJU"], "CRA": ["CRA"], "EZKERRA": ["EZKERRA"], "JS,PC": ["JS,PC"]}
45.98374
240
0.353784
536
5,656
3.729478
0.242537
0.030015
0.030015
0.047524
0.94047
0.94047
0.847424
0.796898
0.796898
0.796898
0
0.000302
0.415488
5,656
122
241
46.360656
0.603751
0.007603
0
0.931034
0
0
0.392265
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d83494f2192a5f29ffbbbbc9abbbe40b6df5159f
1,094
py
Python
tests/test_replaces.py
stscoundrel/sanakirju-simplifier
07c821a2d8ababad60fcf27fb8bfbb0dcf00f8fc
[ "MIT" ]
null
null
null
tests/test_replaces.py
stscoundrel/sanakirju-simplifier
07c821a2d8ababad60fcf27fb8bfbb0dcf00f8fc
[ "MIT" ]
5
2022-02-05T16:18:11.000Z
2022-03-03T16:27:54.000Z
tests/test_replaces.py
stscoundrel/sanakirju-simplifier
07c821a2d8ababad60fcf27fb8bfbb0dcf00f8fc
[ "MIT" ]
null
null
null
from src.sanakirju_simpilifier import replaces, reader def test_replaces_simple_content_in_xml_files() -> None: original_files = reader.read_xml_files() results = replaces.simplify_files(original_files) # Original files should contain replacable bits. for _, contents in original_files: assert "</RangeOfApplication>" in contents assert "</Fragment>" in contents # Result should not contain replacable bits. for _, contents in results: assert "</RangeOfApplication>" not in contents assert "</Fragment>" not in contents def test_replaces_regex_content_in_xml_files() -> None: original_files = reader.read_xml_files() results = replaces.simplify_files(original_files) # Original files should contain regexable bits. for _, contents in original_files: assert "<RangeOfApplication" in contents assert "<Fragment" in contents # Result should not contain regexable bits. for _, contents in results: assert "<RangeOfApplication" not in contents assert "<Fragment" not in contents
34.1875
56
0.721207
127
1,094
5.984252
0.251969
0.136842
0.094737
0.089474
0.884211
0.884211
0.834211
0.834211
0.834211
0.834211
0
0
0.209324
1,094
31
57
35.290323
0.878613
0.161792
0
0.421053
0
0
0.131579
0.046053
0
0
0
0
0.421053
1
0.105263
false
0
0.052632
0
0.157895
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
7
dc293f8b0d284636ad3c563d46b7403c67fa5fe9
27,080
py
Python
test_python_toolbox/test_context_management/test_context_manager.py
hboshnak/python_toolbox
cb9ef64b48f1d03275484d707dc5079b6701ad0c
[ "MIT" ]
119
2015-02-05T17:59:47.000Z
2022-02-21T22:43:40.000Z
test_python_toolbox/test_context_management/test_context_manager.py
hboshnak/python_toolbox
cb9ef64b48f1d03275484d707dc5079b6701ad0c
[ "MIT" ]
4
2019-04-24T14:01:14.000Z
2020-05-21T12:03:29.000Z
test_python_toolbox/test_context_management/test_context_manager.py
hboshnak/python_toolbox
cb9ef64b48f1d03275484d707dc5079b6701ad0c
[ "MIT" ]
14
2015-03-30T06:30:42.000Z
2021-12-24T23:45:11.000Z
# Copyright 2009-2017 Ram Rachum. # This program is distributed under the MIT license. from __future__ import generator_stop from python_toolbox import cute_testing from python_toolbox.context_management import (ContextManager, ContextManagerType, SelfHook) flag = None exception_type_caught = None def test_generator(): '''Test a context manager made from a generator.''' @ContextManagerType def MyContextManager(value): global flag, exception_type_caught former_value = flag flag = value try: yield finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=False, error_catching=False) def test_error_catching_generator(): '''Test an error-catching context manager made from a generator.''' @ContextManagerType def MyContextManager(value): global flag, exception_type_caught former_value = flag flag = value try: yield except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=False, error_catching=True) def test_self_returning_generator(): '''Test a self-returning context manager made from a generator.''' @ContextManagerType def MyContextManager(value): global flag, exception_type_caught former_value = flag flag = value try: yield SelfHook finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=False) def test_self_returning_error_catching_generator(): ''' Test a self-returning error-catching context manager made from a generator. ''' @ContextManagerType def MyContextManager(value): global flag, exception_type_caught former_value = flag flag = value try: yield SelfHook except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_manage_context(): '''Test a context manager that uses a `manage_context` method.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag, exception_type_caught former_value = flag flag = self.value try: yield finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=False, error_catching=False) def test_error_catching_manage_context(): '''Test an error-catching `manage_context`-powered context manager.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag, exception_type_caught former_value = flag flag = self.value try: yield except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=False, error_catching=True) def test_self_returning_manage_context(): '''Test a self-returning `manage_context`-powered context manager.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag former_value = flag flag = self.value try: yield self finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=False) def test_self_returning_error_catching_manage_context(): ''' Test a self-returning error-catching `manage_context` context manager. ''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag, exception_type_caught former_value = flag flag = self.value try: yield self except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_manage_context_overriding_generator(): ''' Test a `manage_context` context manager overriding one made from generator. ''' @ContextManagerType def MyBaseContextManager(value): raise Exception('This code is supposed to be overridden.') yield class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag, exception_type_caught former_value = flag flag = self.value try: yield self except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_manage_context_overriding_manage_context(): ''' Test a `manage_context`-powered context manager overriding another one. ''' class MyBaseContextManager(ContextManager): def __init__(self, value): self.value = value def manage_context(self): raise Exception('This code is supposed to be overridden.') yield class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag, exception_type_caught former_value = flag flag = self.value try: yield self except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_manage_context_overriding_enter_exit(): ''' Test `manage_context` context manager overriding one made from enter/exit. ''' class MyBaseContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): raise Exception('This code is supposed to be overridden.') def __exit__(self, exc_type, exc_value, exc_traceback): raise Exception('This code is supposed to be overridden.') class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value def manage_context(self): global flag, exception_type_caught former_value = flag flag = self.value try: yield self except Exception as exception: exception_type_caught = type(exception) finally: flag = former_value check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_enter_exit(): '''Test an enter/exit context manager.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value def __exit__(self, exc_type, exc_value, exc_traceback): global flag flag = self._former_values.pop() check_context_manager_type(MyContextManager, self_returning=False, error_catching=False) def test_error_catching_enter_exit(): '''Test an error-catching enter/exit context manager.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True check_context_manager_type(MyContextManager, self_returning=False, error_catching=True) def test_self_returning_enter_exit(): '''Test a self-returning enter/exit context manager.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self def __exit__(self, exc_type, exc_value, exc_traceback): global flag flag = self._former_values.pop() check_context_manager_type(MyContextManager, self_returning=True, error_catching=False) def test_error_catching_self_returning_enter_exit(): '''Test an error-catching self-returning enter/exit context manager.''' class MyContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_enter_exit_overriding_generator(): ''' Test an enter/exit context manager overriding one made from generator. ''' @ContextManagerType def MyBaseContextManager(value): raise Exception('This code is supposed to be overridden.') yield class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_enter_exit_overriding_manage_context(): ''' Test enter/exit context manager overriding one made from `manage_context`. ''' class MyBaseContextManager(ContextManager): def __init__(self, value): self.value = value def manage_context(self): raise Exception('This code is supposed to be overridden.') yield class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_enter_exit_overriding_enter_exit(): '''Test an enter/exit context manager overriding another one.''' class MyBaseContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): raise Exception('This code is supposed to be overridden.') def __exit__(self, exc_type, exc_value, exc_traceback): raise Exception('This code is supposed to be overridden.') class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_enter_subclassing_exit(): ''' Test one defining `__enter__` subclassing from one that defines `__exit__`. ''' class MyBaseContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def test_exit_subclassing_enter(): ''' Test one defining `__exit__` subclassing from one that defines `__enter__`. ''' class MyBaseContextManager(ContextManager): def __init__(self, value): self.value = value self._former_values = [] def __enter__(self): global flag self._former_values.append(flag) flag = self.value return self class MyContextManager(MyBaseContextManager): def __init__(self, value): self.value = value self._former_values = [] def __exit__(self, exc_type, exc_value, exc_traceback): global flag, exception_type_caught flag = self._former_values.pop() if exc_type: exception_type_caught = exc_type return True check_context_manager_type(MyContextManager, self_returning=True, error_catching=True) def check_context_manager_type(context_manager_type, self_returning, error_catching): ''' Run checks on a context manager. `self_returning` is a flag saying whether the context manager's `__enter__` method returns itself. (For the `as` keyword after `with`.) `error_catching` says whether the context manager catches exceptions it gets and updates the `exception_type_caught` global. ''' global flag, exception_type_caught assert flag is None assert exception_type_caught is None ### Testing simple case: ################################################## # # with context_manager_type(7) as return_value: assert flag == 7 if self_returning: assert isinstance(return_value, context_manager_type) else: # self_returning is False assert return_value is None # # ### Finished testing simple case. ######################################### assert flag is None assert exception_type_caught is None ### Testing creating context manager before `with`: ####################### # # my_context_manager = context_manager_type(1.1) assert isinstance(my_context_manager, context_manager_type) with my_context_manager as return_value: assert flag == 1.1 if self_returning: assert return_value is my_context_manager else: # self_returning is False assert return_value is None # # ### Finished testing creating context manager before `with`. ############## assert flag is None assert exception_type_caught is None ### Testing decorated function: ########################################### # # @context_manager_type('meow') def f(): assert flag == 'meow' f() assert flag is None assert exception_type_caught is None # # ### Finished testing decorated function. ################################## ### Testing manually decorated function: ################################## # # def g(a, b=2, **kwargs): assert flag == 'meow' new_g = context_manager_type('meow')(g) with cute_testing.RaiseAssertor(AssertionError): g('whatever') assert flag is None assert exception_type_caught is None new_g('whatever') assert flag is None assert exception_type_caught is None cute_testing.assert_polite_wrapper(new_g, g) # # ### Finished testing manually decorated function. ######################### ### Testing deep nesting: ################################################# # # my_context_manager = context_manager_type(123) assert flag is None with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 assert flag == 123 assert flag == 123 assert flag == 123 assert flag == 123 assert flag is None with context_manager_type(1) as return_value_1: assert flag == 1 with context_manager_type(2) as return_value_2: assert flag == 2 with return_value_1 or context_manager_type(1): assert flag == 1 assert flag == 2 assert flag == 1 assert flag is None # # ### Finished testing deep nesting. ######################################## ########################################################################### ########################################################################### ### Now while raising exceptions: ### Testing simple case: ################################################## # # try: with context_manager_type(7) as return_value: assert flag == 7 if self_returning: assert isinstance(return_value, context_manager_type) else: # self_returning is False assert return_value is None raise TypeError('ooga booga') except Exception as exception: assert not error_catching assert type(exception) is TypeError else: assert error_catching assert exception_type_caught is TypeError exception_type_caught = None # # ### Finished testing simple case. ######################################### assert flag is None ### Testing creating context manager before `with`: ####################### # # my_context_manager = context_manager_type(1.1) assert isinstance(my_context_manager, context_manager_type) try: with my_context_manager as return_value: assert flag == 1.1 if self_returning: assert return_value is my_context_manager else: # self_returning is False assert return_value is None {}[3] except Exception as exception: assert not error_catching assert exception_type_caught is None assert type(exception) is KeyError else: assert error_catching assert exception_type_caught is KeyError exception_type_caught = None # # ### Finished testing creating context manager before `with`. ############## assert flag is None assert exception_type_caught is None ### Testing decorated function: ########################################### # # @context_manager_type('meow') def f(): assert flag == 'meow' 1/0 try: f() except Exception as exception: assert not error_catching assert exception_type_caught is None assert type(exception) is ZeroDivisionError else: assert error_catching assert exception_type_caught is ZeroDivisionError exception_type_caught = None # # ### Finished testing decorated function. ################################## assert flag is None exception_type_caught = None ### Testing manually decorated function: ################################## # # def g(a, b=2, **kwargs): assert flag == 'meow' eval('Ooga booga I am a syntax error.') with cute_testing.RaiseAssertor(AssertionError): g('whatever') assert flag is None assert exception_type_caught is None new_g = context_manager_type('meow')(g) assert flag is None assert exception_type_caught is None cute_testing.assert_polite_wrapper(new_g, g) try: new_g('whatever') except Exception as exception: assert not error_catching assert exception_type_caught is None assert type(exception) is SyntaxError else: assert error_catching assert exception_type_caught is SyntaxError exception_type_caught = None # # ### Finished testing manually decorated function. ######################## ### Testing deep nesting: ################################################# # # my_context_manager = context_manager_type(123) assert flag is None try: with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 with my_context_manager: assert flag == 123 raise LookupError assert flag == 123 assert flag == 123 assert flag == 123 assert flag == 123 except Exception as exception: assert not error_catching assert exception_type_caught is None assert type(exception) is LookupError else: assert error_catching assert exception_type_caught is LookupError exception_type_caught = None assert flag is None try: with context_manager_type(1) as return_value_1: assert flag == 1 with context_manager_type(2) as return_value_2: assert flag == 2 with return_value_1 or context_manager_type(1): assert flag == 1 raise NotImplementedError assert flag == 2 assert flag == 1 except Exception as exception: assert not error_catching assert exception_type_caught is None assert type(exception) is NotImplementedError else: assert error_catching assert exception_type_caught is NotImplementedError exception_type_caught = None assert flag is None # # ### Finished testing deep nesting. ########################################
32.984166
79
0.54243
2,536
27,080
5.476735
0.059148
0.087695
0.083447
0.025344
0.919433
0.899129
0.875369
0.870401
0.849521
0.817625
0
0.006281
0.370938
27,080
820
80
33.02439
0.80904
0.147194
0
0.917688
0
0
0.019034
0
0
0
0
0
0.183888
1
0.14711
false
0
0.005254
0
0.215412
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
dc2d409b4b116fa166f20eef9672181262358155
105,119
py
Python
huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/sdrs_client.py
huaweicloud/huaweicloud-sdk-python-v3
7a6270390fcbf192b3882bf763e7016e6026ef78
[ "Apache-2.0" ]
64
2020-06-12T07:05:07.000Z
2022-03-30T03:32:50.000Z
huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/sdrs_client.py
huaweicloud/huaweicloud-sdk-python-v3
7a6270390fcbf192b3882bf763e7016e6026ef78
[ "Apache-2.0" ]
11
2020-07-06T07:56:54.000Z
2022-01-11T11:14:40.000Z
huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/sdrs_client.py
huaweicloud/huaweicloud-sdk-python-v3
7a6270390fcbf192b3882bf763e7016e6026ef78
[ "Apache-2.0" ]
24
2020-06-08T11:42:13.000Z
2022-03-04T06:44:08.000Z
# coding: utf-8 from __future__ import absolute_import import datetime import re import importlib import six from huaweicloudsdkcore.client import Client, ClientBuilder from huaweicloudsdkcore.exceptions import exceptions from huaweicloudsdkcore.utils import http_utils from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest class SdrsClient(Client): """ :param configuration: .Configuration object for this client :param pool_threads: The number of threads to use for async requests to the API. More threads means more concurrent API requests. """ PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types NATIVE_TYPES_MAPPING = { 'int': int, 'long': int if six.PY3 else long, 'float': float, 'str': str, 'bool': bool, 'date': datetime.date, 'datetime': datetime.datetime, 'object': object, } def __init__(self): super(SdrsClient, self).__init__() self.model_package = importlib.import_module("huaweicloudsdksdrs.v1.model") self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'} @classmethod def new_builder(cls, clazz=None): if clazz is None: return ClientBuilder(cls) if clazz.__name__ != "SdrsClient": raise TypeError("client type error, support client type is SdrsClient") return ClientBuilder(clazz) def add_protected_instance_nic(self, request): """保护实例添加网卡 给指定的保护实例添加网卡。 :param AddProtectedInstanceNicRequest request :return: AddProtectedInstanceNicResponse """ return self.add_protected_instance_nic_with_http_info(request) def add_protected_instance_nic_with_http_info(self, request): """保护实例添加网卡 给指定的保护实例添加网卡。 :param AddProtectedInstanceNicRequest request :return: AddProtectedInstanceNicResponse """ all_params = ['protected_instance_id', 'protected_instance_add_nic_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/nic', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='AddProtectedInstanceNicResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def add_protected_instance_tags(self, request): """添加保护实例标签 一个保护实例上最多有10个标签。此接口为幂等接口:创建时,如果创建的标签已经存在(key相同),则覆盖。 :param AddProtectedInstanceTagsRequest request :return: AddProtectedInstanceTagsResponse """ return self.add_protected_instance_tags_with_http_info(request) def add_protected_instance_tags_with_http_info(self, request): """添加保护实例标签 一个保护实例上最多有10个标签。此接口为幂等接口:创建时,如果创建的标签已经存在(key相同),则覆盖。 :param AddProtectedInstanceTagsRequest request :return: AddProtectedInstanceTagsResponse """ all_params = ['protected_instance_id', 'protected_instance_add_tags_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/tags', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='AddProtectedInstanceTagsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def attach_protected_instance_replication(self, request): """保护实例挂载复制对 将指定的复制对挂载到指定的保护实例上。 :param AttachProtectedInstanceReplicationRequest request :return: AttachProtectedInstanceReplicationResponse """ return self.attach_protected_instance_replication_with_http_info(request) def attach_protected_instance_replication_with_http_info(self, request): """保护实例挂载复制对 将指定的复制对挂载到指定的保护实例上。 :param AttachProtectedInstanceReplicationRequest request :return: AttachProtectedInstanceReplicationResponse """ all_params = ['protected_instance_id', 'protected_instance_attach_replication_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/attachreplication', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='AttachProtectedInstanceReplicationResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def batch_add_tags(self, request): """批量添加保护实例标签 为指定保护实例批量添加或删除标签。一个资源上最多有10个标签。 此接口为幂等接口: 创建时如果请求体中存在重复key则报错。 创建时,不允许设置重复key数据,如果数据库已存在该key,就覆盖value的值。 :param BatchAddTagsRequest request :return: BatchAddTagsResponse """ return self.batch_add_tags_with_http_info(request) def batch_add_tags_with_http_info(self, request): """批量添加保护实例标签 为指定保护实例批量添加或删除标签。一个资源上最多有10个标签。 此接口为幂等接口: 创建时如果请求体中存在重复key则报错。 创建时,不允许设置重复key数据,如果数据库已存在该key,就覆盖value的值。 :param BatchAddTagsRequest request :return: BatchAddTagsResponse """ all_params = ['protected_instance_id', 'batch_add_or_delete_tags_request'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/tags/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='BatchAddTagsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def batch_create_protected_instances(self, request): """批量创建保护实例 典型场景:没有特殊操作场景 接口功能:批量创建保护实例。保护实例创建完成后,系统默认容灾站点云服务器名称与生产站点云服务器名称相同,但ID不同。 :param BatchCreateProtectedInstancesRequest request :return: BatchCreateProtectedInstancesResponse """ return self.batch_create_protected_instances_with_http_info(request) def batch_create_protected_instances_with_http_info(self, request): """批量创建保护实例 典型场景:没有特殊操作场景 接口功能:批量创建保护实例。保护实例创建完成后,系统默认容灾站点云服务器名称与生产站点云服务器名称相同,但ID不同。 :param BatchCreateProtectedInstancesRequest request :return: BatchCreateProtectedInstancesResponse """ all_params = ['batch_create_protected_instances_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/batch', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='BatchCreateProtectedInstancesResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def batch_delete_protected_instances(self, request): """批量删除保护实例 典型场景:没有特殊操作场景 接口功能:批量删除保护实例。 :param BatchDeleteProtectedInstancesRequest request :return: BatchDeleteProtectedInstancesResponse """ return self.batch_delete_protected_instances_with_http_info(request) def batch_delete_protected_instances_with_http_info(self, request): """批量删除保护实例 典型场景:没有特殊操作场景 接口功能:批量删除保护实例。 :param BatchDeleteProtectedInstancesRequest request :return: BatchDeleteProtectedInstancesResponse """ all_params = ['batch_delete_protected_instances_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/delete', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='BatchDeleteProtectedInstancesResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def batch_delete_tags(self, request): """批量删除保护实例标签 为指定保护实例批量删除标签。一个资源上最多有10个标签。 此接口为幂等接口: 删除时,如果删除的标签不存在,默认处理成功,删除时不对标签字符集范围做校验。删除时tags结构体不能缺失,key不能为空,或者空字符串。 :param BatchDeleteTagsRequest request :return: BatchDeleteTagsResponse """ return self.batch_delete_tags_with_http_info(request) def batch_delete_tags_with_http_info(self, request): """批量删除保护实例标签 为指定保护实例批量删除标签。一个资源上最多有10个标签。 此接口为幂等接口: 删除时,如果删除的标签不存在,默认处理成功,删除时不对标签字符集范围做校验。删除时tags结构体不能缺失,key不能为空,或者空字符串。 :param BatchDeleteTagsRequest request :return: BatchDeleteTagsResponse """ all_params = ['protected_instance_id', 'batch_add_or_delete_tags_request'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/tags/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='BatchDeleteTagsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def create_disaster_recovery_drill(self, request): """创建容灾演练 创建容灾演练。 :param CreateDisasterRecoveryDrillRequest request :return: CreateDisasterRecoveryDrillResponse """ return self.create_disaster_recovery_drill_with_http_info(request) def create_disaster_recovery_drill_with_http_info(self, request): """创建容灾演练 创建容灾演练。 :param CreateDisasterRecoveryDrillRequest request :return: CreateDisasterRecoveryDrillResponse """ all_params = ['create_disaster_recovery_drill_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/disaster-recovery-drills', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='CreateDisasterRecoveryDrillResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def create_protected_instance(self, request): """创建保护实例 创建保护实例。保护实例创建完成后,系统默认容灾站点云服务器名称与生产站点云服务器名称相同,但ID不同。如果需要修改云服务器名称,请在保护实例详情页面单击云服务器名称,进入云服务器详情页面进行修改 :param CreateProtectedInstanceRequest request :return: CreateProtectedInstanceResponse """ return self.create_protected_instance_with_http_info(request) def create_protected_instance_with_http_info(self, request): """创建保护实例 创建保护实例。保护实例创建完成后,系统默认容灾站点云服务器名称与生产站点云服务器名称相同,但ID不同。如果需要修改云服务器名称,请在保护实例详情页面单击云服务器名称,进入云服务器详情页面进行修改 :param CreateProtectedInstanceRequest request :return: CreateProtectedInstanceResponse """ all_params = ['create_protected_instance_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='CreateProtectedInstanceResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def create_protection_group(self, request): """创建保护组 创建保护组。 说明: 本接口为异步接口,调用成功只是表示请求下发,创建结果需要通过“查询job状态”接口获取 :param CreateProtectionGroupRequest request :return: CreateProtectionGroupResponse """ return self.create_protection_group_with_http_info(request) def create_protection_group_with_http_info(self, request): """创建保护组 创建保护组。 说明: 本接口为异步接口,调用成功只是表示请求下发,创建结果需要通过“查询job状态”接口获取 :param CreateProtectionGroupRequest request :return: CreateProtectionGroupResponse """ all_params = ['create_protection_group_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='CreateProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def create_replication(self, request): """创建复制对 创建复制对,并将其添加到指定的保护组中。 :param CreateReplicationRequest request :return: CreateReplicationResponse """ return self.create_replication_with_http_info(request) def create_replication_with_http_info(self, request): """创建复制对 创建复制对,并将其添加到指定的保护组中。 :param CreateReplicationRequest request :return: CreateReplicationResponse """ all_params = ['create_replication_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/replications', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='CreateReplicationResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_all_server_group_failure_jobs(self, request): """删除所有保护组失败任务 删除所有保护组层级的失败任务,创建、删除保护组失败等。 :param DeleteAllServerGroupFailureJobsRequest request :return: DeleteAllServerGroupFailureJobsResponse """ return self.delete_all_server_group_failure_jobs_with_http_info(request) def delete_all_server_group_failure_jobs_with_http_info(self, request): """删除所有保护组失败任务 删除所有保护组层级的失败任务,创建、删除保护组失败等。 :param DeleteAllServerGroupFailureJobsRequest request :return: DeleteAllServerGroupFailureJobsResponse """ all_params = [] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/task-center/failure-jobs/batch', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteAllServerGroupFailureJobsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_disaster_recovery_drill(self, request): """删除容灾演练 删除指定的容灾演练。删除后: 容灾演练服务器、容灾演练服务器上挂载的磁盘和网卡将被一并删除。 演练VPC、演练VPC的子网不会被删除。您可以继续使用该VPC创建其他云服务器。 :param DeleteDisasterRecoveryDrillRequest request :return: DeleteDisasterRecoveryDrillResponse """ return self.delete_disaster_recovery_drill_with_http_info(request) def delete_disaster_recovery_drill_with_http_info(self, request): """删除容灾演练 删除指定的容灾演练。删除后: 容灾演练服务器、容灾演练服务器上挂载的磁盘和网卡将被一并删除。 演练VPC、演练VPC的子网不会被删除。您可以继续使用该VPC创建其他云服务器。 :param DeleteDisasterRecoveryDrillRequest request :return: DeleteDisasterRecoveryDrillResponse """ all_params = ['disaster_recovery_drill_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'disaster_recovery_drill_id' in local_var_params: path_params['disaster_recovery_drill_id'] = local_var_params['disaster_recovery_drill_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/disaster-recovery-drills/{disaster_recovery_drill_id}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteDisasterRecoveryDrillResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_failure_job(self, request): """删除单个失败任务 删除单个失败任务。 :param DeleteFailureJobRequest request :return: DeleteFailureJobResponse """ return self.delete_failure_job_with_http_info(request) def delete_failure_job_with_http_info(self, request): """删除单个失败任务 删除单个失败任务。 :param DeleteFailureJobRequest request :return: DeleteFailureJobResponse """ all_params = ['failure_job_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'failure_job_id' in local_var_params: path_params['failure_job_id'] = local_var_params['failure_job_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/task-center/failure-jobs/{failure_job_id}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteFailureJobResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_protected_instance(self, request): """删除保护实例 删除指定的保护实例。 :param DeleteProtectedInstanceRequest request :return: DeleteProtectedInstanceResponse """ return self.delete_protected_instance_with_http_info(request) def delete_protected_instance_with_http_info(self, request): """删除保护实例 删除指定的保护实例。 :param DeleteProtectedInstanceRequest request :return: DeleteProtectedInstanceResponse """ all_params = ['protected_instance_id', 'delete_protected_instance_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteProtectedInstanceResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_protected_instance_nic(self, request): """保护实例删除网卡 删除指定保护实例的指定网卡。 :param DeleteProtectedInstanceNicRequest request :return: DeleteProtectedInstanceNicResponse """ return self.delete_protected_instance_nic_with_http_info(request) def delete_protected_instance_nic_with_http_info(self, request): """保护实例删除网卡 删除指定保护实例的指定网卡。 :param DeleteProtectedInstanceNicRequest request :return: DeleteProtectedInstanceNicResponse """ all_params = ['protected_instance_id', 'protected_instance_delete_nic_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/nic/delete', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteProtectedInstanceNicResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_protected_instance_tag(self, request): """删除保护实例标签 幂等接口:删除时,不对标签字符集做校验,调用接口前必须要做encodeURI,服务端需要对接口URI做decodeURI。 说明:请自行选择工具执行URI编码。 删除的key不存在报404,Key不能为空或者空字符串。 :param DeleteProtectedInstanceTagRequest request :return: DeleteProtectedInstanceTagResponse """ return self.delete_protected_instance_tag_with_http_info(request) def delete_protected_instance_tag_with_http_info(self, request): """删除保护实例标签 幂等接口:删除时,不对标签字符集做校验,调用接口前必须要做encodeURI,服务端需要对接口URI做decodeURI。 说明:请自行选择工具执行URI编码。 删除的key不存在报404,Key不能为空或者空字符串。 :param DeleteProtectedInstanceTagRequest request :return: DeleteProtectedInstanceTagResponse """ all_params = ['protected_instance_id', 'key'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] if 'key' in local_var_params: path_params['key'] = local_var_params['key'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/tags/{key}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteProtectedInstanceTagResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_protection_group(self, request): """删除保护组 删除指定的保护组。 :param DeleteProtectionGroupRequest request :return: DeleteProtectionGroupResponse """ return self.delete_protection_group_with_http_info(request) def delete_protection_group_with_http_info(self, request): """删除保护组 删除指定的保护组。 :param DeleteProtectionGroupRequest request :return: DeleteProtectionGroupResponse """ all_params = ['server_group_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_replication(self, request): """删除复制对 删除指定的复制对。 :param DeleteReplicationRequest request :return: DeleteReplicationResponse """ return self.delete_replication_with_http_info(request) def delete_replication_with_http_info(self, request): """删除复制对 删除指定的复制对。 :param DeleteReplicationRequest request :return: DeleteReplicationResponse """ all_params = ['replication_id', 'delete_replication_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'replication_id' in local_var_params: path_params['replication_id'] = local_var_params['replication_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/replications/{replication_id}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteReplicationResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def delete_server_group_failure_jobs(self, request): """删除指定保护组内的所有失败任务 删除指定保护组内的所有失败任务,创建保护实例失败、创建复制对失败、删除保护实例失败、删除复制对失败等。 :param DeleteServerGroupFailureJobsRequest request :return: DeleteServerGroupFailureJobsResponse """ return self.delete_server_group_failure_jobs_with_http_info(request) def delete_server_group_failure_jobs_with_http_info(self, request): """删除指定保护组内的所有失败任务 删除指定保护组内的所有失败任务,创建保护实例失败、创建复制对失败、删除保护实例失败、删除复制对失败等。 :param DeleteServerGroupFailureJobsRequest request :return: DeleteServerGroupFailureJobsResponse """ all_params = ['server_group_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/task-center/{server_group_id}/failure-jobs/batch', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DeleteServerGroupFailureJobsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def detach_protected_instance_replication(self, request): """保护实例卸载复制对 将指定的复制对从指定的保护实例上卸载。 :param DetachProtectedInstanceReplicationRequest request :return: DetachProtectedInstanceReplicationResponse """ return self.detach_protected_instance_replication_with_http_info(request) def detach_protected_instance_replication_with_http_info(self, request): """保护实例卸载复制对 将指定的复制对从指定的保护实例上卸载。 :param DetachProtectedInstanceReplicationRequest request :return: DetachProtectedInstanceReplicationResponse """ all_params = ['protected_instance_id', 'replication_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] if 'replication_id' in local_var_params: path_params['replication_id'] = local_var_params['replication_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/detachreplication/{replication_id}', method='DELETE', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='DetachProtectedInstanceReplicationResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def expand_replication(self, request): """复制对扩容 对复制对包含的两个磁盘进行扩容操作。 :param ExpandReplicationRequest request :return: ExpandReplicationResponse """ return self.expand_replication_with_http_info(request) def expand_replication_with_http_info(self, request): """复制对扩容 对复制对包含的两个磁盘进行扩容操作。 :param ExpandReplicationRequest request :return: ExpandReplicationResponse """ all_params = ['replication_id', 'extend_replication_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'replication_id' in local_var_params: path_params['replication_id'] = local_var_params['replication_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/replications/{replication_id}/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ExpandReplicationResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_active_active_domains(self, request): """查询双活域 查询双活域。双活域由本端存储设备、远端存储设备组成,通过双活域,应用服务器可以实现跨站点的数据访问。 :param ListActiveActiveDomainsRequest request :return: ListActiveActiveDomainsResponse """ return self.list_active_active_domains_with_http_info(request) def list_active_active_domains_with_http_info(self, request): """查询双活域 查询双活域。双活域由本端存储设备、远端存储设备组成,通过双活域,应用服务器可以实现跨站点的数据访问。 :param ListActiveActiveDomainsRequest request :return: ListActiveActiveDomainsResponse """ all_params = [] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/active-domains', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListActiveActiveDomainsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_disaster_recovery_drills(self, request): """查询容灾演练列表 查询指定保护组下的所有容灾演练列表,当未指定保护组时查询当前租户下的所有容灾演练列表。 :param ListDisasterRecoveryDrillsRequest request :return: ListDisasterRecoveryDrillsResponse """ return self.list_disaster_recovery_drills_with_http_info(request) def list_disaster_recovery_drills_with_http_info(self, request): """查询容灾演练列表 查询指定保护组下的所有容灾演练列表,当未指定保护组时查询当前租户下的所有容灾演练列表。 :param ListDisasterRecoveryDrillsRequest request :return: ListDisasterRecoveryDrillsResponse """ all_params = ['server_group_id', 'name', 'status', 'drill_vpc_id', 'limit', 'offset'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] if 'server_group_id' in local_var_params: query_params.append(('server_group_id', local_var_params['server_group_id'])) if 'name' in local_var_params: query_params.append(('name', local_var_params['name'])) if 'status' in local_var_params: query_params.append(('status', local_var_params['status'])) if 'drill_vpc_id' in local_var_params: query_params.append(('drill_vpc_id', local_var_params['drill_vpc_id'])) if 'limit' in local_var_params: query_params.append(('limit', local_var_params['limit'])) if 'offset' in local_var_params: query_params.append(('offset', local_var_params['offset'])) header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/disaster-recovery-drills', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListDisasterRecoveryDrillsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_failure_jobs(self, request): """查询失败任务列表 查询所有保护组失败任务列表或者指定保护组下的所有失败任务列表。 :param ListFailureJobsRequest request :return: ListFailureJobsResponse """ return self.list_failure_jobs_with_http_info(request) def list_failure_jobs_with_http_info(self, request): """查询失败任务列表 查询所有保护组失败任务列表或者指定保护组下的所有失败任务列表。 :param ListFailureJobsRequest request :return: ListFailureJobsResponse """ all_params = ['failure_status', 'resource_name', 'server_group_id', 'resource_type', 'limit', 'offset'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] if 'failure_status' in local_var_params: query_params.append(('failure_status', local_var_params['failure_status'])) if 'resource_name' in local_var_params: query_params.append(('resource_name', local_var_params['resource_name'])) if 'server_group_id' in local_var_params: query_params.append(('server_group_id', local_var_params['server_group_id'])) if 'resource_type' in local_var_params: query_params.append(('resource_type', local_var_params['resource_type'])) if 'limit' in local_var_params: query_params.append(('limit', local_var_params['limit'])) if 'offset' in local_var_params: query_params.append(('offset', local_var_params['offset'])) header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/task-center/failure-jobs', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListFailureJobsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_protected_instance_tags(self, request): """查询保护实例标签 查询指定保护实例的标签信息。 :param ListProtectedInstanceTagsRequest request :return: ListProtectedInstanceTagsResponse """ return self.list_protected_instance_tags_with_http_info(request) def list_protected_instance_tags_with_http_info(self, request): """查询保护实例标签 查询指定保护实例的标签信息。 :param ListProtectedInstanceTagsRequest request :return: ListProtectedInstanceTagsResponse """ all_params = ['protected_instance_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/tags', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListProtectedInstanceTagsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_protected_instances(self, request): """查询保护实例列表 查询当前租户下的所有保护实例列表。 :param ListProtectedInstancesRequest request :return: ListProtectedInstancesResponse """ return self.list_protected_instances_with_http_info(request) def list_protected_instances_with_http_info(self, request): """查询保护实例列表 查询当前租户下的所有保护实例列表。 :param ListProtectedInstancesRequest request :return: ListProtectedInstancesResponse """ all_params = ['server_group_id', 'server_group_ids', 'protected_instance_ids', 'limit', 'offset', 'status', 'name', 'query_type', 'availability_zone'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] if 'server_group_id' in local_var_params: query_params.append(('server_group_id', local_var_params['server_group_id'])) if 'server_group_ids' in local_var_params: query_params.append(('server_group_ids', local_var_params['server_group_ids'])) if 'protected_instance_ids' in local_var_params: query_params.append(('protected_instance_ids', local_var_params['protected_instance_ids'])) if 'limit' in local_var_params: query_params.append(('limit', local_var_params['limit'])) if 'offset' in local_var_params: query_params.append(('offset', local_var_params['offset'])) if 'status' in local_var_params: query_params.append(('status', local_var_params['status'])) if 'name' in local_var_params: query_params.append(('name', local_var_params['name'])) if 'query_type' in local_var_params: query_params.append(('query_type', local_var_params['query_type'])) if 'availability_zone' in local_var_params: query_params.append(('availability_zone', local_var_params['availability_zone'])) header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListProtectedInstancesResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_protected_instances_by_tags(self, request): """通过标签查询保护实例 使用标签过滤保护实例 :param ListProtectedInstancesByTagsRequest request :return: ListProtectedInstancesByTagsResponse """ return self.list_protected_instances_by_tags_with_http_info(request) def list_protected_instances_by_tags_with_http_info(self, request): """通过标签查询保护实例 使用标签过滤保护实例 :param ListProtectedInstancesByTagsRequest request :return: ListProtectedInstancesByTagsResponse """ all_params = ['list_protected_instances_by_tags_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/resource_instances/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListProtectedInstancesByTagsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_protected_instances_project_tags(self, request): """查询保护实例项目标签 查询租户在指定Project中保护实例的所有资源标签集合。 :param ListProtectedInstancesProjectTagsRequest request :return: ListProtectedInstancesProjectTagsResponse """ return self.list_protected_instances_project_tags_with_http_info(request) def list_protected_instances_project_tags_with_http_info(self, request): """查询保护实例项目标签 查询租户在指定Project中保护实例的所有资源标签集合。 :param ListProtectedInstancesProjectTagsRequest request :return: ListProtectedInstancesProjectTagsResponse """ all_params = [] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/tags', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListProtectedInstancesProjectTagsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_protection_groups(self, request): """查询保护组列表 查询当前租户所有的保护组列表。 :param ListProtectionGroupsRequest request :return: ListProtectionGroupsResponse """ return self.list_protection_groups_with_http_info(request) def list_protection_groups_with_http_info(self, request): """查询保护组列表 查询当前租户所有的保护组列表。 :param ListProtectionGroupsRequest request :return: ListProtectionGroupsResponse """ all_params = ['limit', 'offset', 'status', 'name', 'query_type', 'availability_zone'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] if 'limit' in local_var_params: query_params.append(('limit', local_var_params['limit'])) if 'offset' in local_var_params: query_params.append(('offset', local_var_params['offset'])) if 'status' in local_var_params: query_params.append(('status', local_var_params['status'])) if 'name' in local_var_params: query_params.append(('name', local_var_params['name'])) if 'query_type' in local_var_params: query_params.append(('query_type', local_var_params['query_type'])) if 'availability_zone' in local_var_params: query_params.append(('availability_zone', local_var_params['availability_zone'])) header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListProtectionGroupsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_replications(self, request): """查询复制对列表 查询指定保护组下的所有复制对列表,如果不给定指定保护组则查询当前租户下的所有复制对列表。 :param ListReplicationsRequest request :return: ListReplicationsResponse """ return self.list_replications_with_http_info(request) def list_replications_with_http_info(self, request): """查询复制对列表 查询指定保护组下的所有复制对列表,如果不给定指定保护组则查询当前租户下的所有复制对列表。 :param ListReplicationsRequest request :return: ListReplicationsResponse """ all_params = ['server_group_id', 'server_group_ids', 'protected_instance_id', 'protected_instance_ids', 'name', 'status', 'limit', 'offset', 'query_type', 'availability_zone'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] if 'server_group_id' in local_var_params: query_params.append(('server_group_id', local_var_params['server_group_id'])) if 'server_group_ids' in local_var_params: query_params.append(('server_group_ids', local_var_params['server_group_ids'])) if 'protected_instance_id' in local_var_params: query_params.append(('protected_instance_id', local_var_params['protected_instance_id'])) if 'protected_instance_ids' in local_var_params: query_params.append(('protected_instance_ids', local_var_params['protected_instance_ids'])) if 'name' in local_var_params: query_params.append(('name', local_var_params['name'])) if 'status' in local_var_params: query_params.append(('status', local_var_params['status'])) if 'limit' in local_var_params: query_params.append(('limit', local_var_params['limit'])) if 'offset' in local_var_params: query_params.append(('offset', local_var_params['offset'])) if 'query_type' in local_var_params: query_params.append(('query_type', local_var_params['query_type'])) if 'availability_zone' in local_var_params: query_params.append(('availability_zone', local_var_params['availability_zone'])) header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/replications', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListReplicationsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_rpo_statistics(self, request): """查询资源的RPO超标趋势记录列表 查询当前租户大屏显示中,资源的RPO超标趋势记录列表。 :param ListRpoStatisticsRequest request :return: ListRpoStatisticsResponse """ return self.list_rpo_statistics_with_http_info(request) def list_rpo_statistics_with_http_info(self, request): """查询资源的RPO超标趋势记录列表 查询当前租户大屏显示中,资源的RPO超标趋势记录列表。 :param ListRpoStatisticsRequest request :return: ListRpoStatisticsResponse """ all_params = ['limit', 'offset', 'start_time', 'end_time', 'resource_type'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] if 'limit' in local_var_params: query_params.append(('limit', local_var_params['limit'])) if 'offset' in local_var_params: query_params.append(('offset', local_var_params['offset'])) if 'start_time' in local_var_params: query_params.append(('start_time', local_var_params['start_time'])) if 'end_time' in local_var_params: query_params.append(('end_time', local_var_params['end_time'])) if 'resource_type' in local_var_params: query_params.append(('resource_type', local_var_params['resource_type'])) header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/resource/rpo-statistics', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListRpoStatisticsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def resize_protected_instance(self, request): """保护实例变更规格 变更指定保护实例中弹性云服务器的规格,包括:同时变更生产站点云服务器和容灾站点云服务器的规格。 仅变更生产站点云服务器的规格,容灾站点云服务器规格不变。 生产站点云服务器规格不变,仅变更容灾站点云服务器的规格。 当且仅当待变更规格的云服务器处于关机状态时,才能执行此操作。 说明:不同规格的云服务器在性能上存在差异,可能会对云服务器上运行的应用产生影响。 为保证切换/故障切换后云服务器的性能,建议容灾站点服务器的规格(CPU、内存)不低于生产站点云服务器的规格(CPU、内存)。 :param ResizeProtectedInstanceRequest request :return: ResizeProtectedInstanceResponse """ return self.resize_protected_instance_with_http_info(request) def resize_protected_instance_with_http_info(self, request): """保护实例变更规格 变更指定保护实例中弹性云服务器的规格,包括:同时变更生产站点云服务器和容灾站点云服务器的规格。 仅变更生产站点云服务器的规格,容灾站点云服务器规格不变。 生产站点云服务器规格不变,仅变更容灾站点云服务器的规格。 当且仅当待变更规格的云服务器处于关机状态时,才能执行此操作。 说明:不同规格的云服务器在性能上存在差异,可能会对云服务器上运行的应用产生影响。 为保证切换/故障切换后云服务器的性能,建议容灾站点服务器的规格(CPU、内存)不低于生产站点云服务器的规格(CPU、内存)。 :param ResizeProtectedInstanceRequest request :return: ResizeProtectedInstanceResponse """ all_params = ['protected_instance_id', 'resize_protected_instance_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}/resize', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ResizeProtectedInstanceResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_disaster_recovery_drill(self, request): """查询单个容灾演练详情 查询单个容灾演练的详细信息。 :param ShowDisasterRecoveryDrillRequest request :return: ShowDisasterRecoveryDrillResponse """ return self.show_disaster_recovery_drill_with_http_info(request) def show_disaster_recovery_drill_with_http_info(self, request): """查询单个容灾演练详情 查询单个容灾演练的详细信息。 :param ShowDisasterRecoveryDrillRequest request :return: ShowDisasterRecoveryDrillResponse """ all_params = ['disaster_recovery_drill_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'disaster_recovery_drill_id' in local_var_params: path_params['disaster_recovery_drill_id'] = local_var_params['disaster_recovery_drill_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/disaster-recovery-drills/{disaster_recovery_drill_id}', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowDisasterRecoveryDrillResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_protected_instance(self, request): """查询单个保护实例详情 查询单个保护实例的详细信息,如名称、ID等。 :param ShowProtectedInstanceRequest request :return: ShowProtectedInstanceResponse """ return self.show_protected_instance_with_http_info(request) def show_protected_instance_with_http_info(self, request): """查询单个保护实例详情 查询单个保护实例的详细信息,如名称、ID等。 :param ShowProtectedInstanceRequest request :return: ShowProtectedInstanceResponse """ all_params = ['protected_instance_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowProtectedInstanceResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_protection_group(self, request): """查询保护组详情 查询单个保护组的详细信息,如ID、名称等。 :param ShowProtectionGroupRequest request :return: ShowProtectionGroupResponse """ return self.show_protection_group_with_http_info(request) def show_protection_group_with_http_info(self, request): """查询保护组详情 查询单个保护组的详细信息,如ID、名称等。 :param ShowProtectionGroupRequest request :return: ShowProtectionGroupResponse """ all_params = ['server_group_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_quota(self, request): """查询租户配额 查询资源的配额相关信息。 :param ShowQuotaRequest request :return: ShowQuotaResponse """ return self.show_quota_with_http_info(request) def show_quota_with_http_info(self, request): """查询租户配额 查询资源的配额相关信息。 :param ShowQuotaRequest request :return: ShowQuotaResponse """ all_params = [] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/sdrs/quotas', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowQuotaResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_replication(self, request): """查询单个复制对详情 查询单个复制对的详细信息。 :param ShowReplicationRequest request :return: ShowReplicationResponse """ return self.show_replication_with_http_info(request) def show_replication_with_http_info(self, request): """查询单个复制对详情 查询单个复制对的详细信息。 :param ShowReplicationRequest request :return: ShowReplicationResponse """ all_params = ['replication_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'replication_id' in local_var_params: path_params['replication_id'] = local_var_params['replication_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/replications/{replication_id}', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowReplicationResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def start_failover_protection_group(self, request): """保护组故障切换 当保护组的生产站点发生故障时,将保护组的生产站点切到当前的容灾站点,即另一端AZ,启用当前容灾站点的云硬盘以及云服务器等资源。 故障切换完成之后,保护组的当前生产站点变成故障切换发生之前的容灾站点,且生产站点和容灾站点之间的数据已停止保护,必须调用5.4.6-保护组开启保护/重保护接口成功后,两端的数据才会重新被保护。 :param StartFailoverProtectionGroupRequest request :return: StartFailoverProtectionGroupResponse """ return self.start_failover_protection_group_with_http_info(request) def start_failover_protection_group_with_http_info(self, request): """保护组故障切换 当保护组的生产站点发生故障时,将保护组的生产站点切到当前的容灾站点,即另一端AZ,启用当前容灾站点的云硬盘以及云服务器等资源。 故障切换完成之后,保护组的当前生产站点变成故障切换发生之前的容灾站点,且生产站点和容灾站点之间的数据已停止保护,必须调用5.4.6-保护组开启保护/重保护接口成功后,两端的数据才会重新被保护。 :param StartFailoverProtectionGroupRequest request :return: StartFailoverProtectionGroupResponse """ all_params = ['server_group_id', 'failover_protection_group_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='StartFailoverProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def start_protection_group(self, request): """保护组开启保护/重保护 对某一个保护组的“开启保护”或“重保护”操作。 :param StartProtectionGroupRequest request :return: StartProtectionGroupResponse """ return self.start_protection_group_with_http_info(request) def start_protection_group_with_http_info(self, request): """保护组开启保护/重保护 对某一个保护组的“开启保护”或“重保护”操作。 :param StartProtectionGroupRequest request :return: StartProtectionGroupResponse """ all_params = ['server_group_id', 'start_protection_group_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='StartProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def start_reverse_protection_group(self, request): """保护组切换 对保护组进行切换操作,可以将保护组的当前生产站点,从创建保护组时指定的生产站点切换到创建保护组时指定的容灾站点,也可以从创建保护组时指定的容灾站点切换到创建保护组时指定的生产站点。切换后,生产站点和容灾站点的数据仍然处于被保护状态,只是复制方向与操作之前相反。 :param StartReverseProtectionGroupRequest request :return: StartReverseProtectionGroupResponse """ return self.start_reverse_protection_group_with_http_info(request) def start_reverse_protection_group_with_http_info(self, request): """保护组切换 对保护组进行切换操作,可以将保护组的当前生产站点,从创建保护组时指定的生产站点切换到创建保护组时指定的容灾站点,也可以从创建保护组时指定的容灾站点切换到创建保护组时指定的生产站点。切换后,生产站点和容灾站点的数据仍然处于被保护状态,只是复制方向与操作之前相反。 :param StartReverseProtectionGroupRequest request :return: StartReverseProtectionGroupResponse """ all_params = ['server_group_id', 'reverse_protection_group_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='StartReverseProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def stop_protection_group(self, request): """保护组停止保护 对某一个保护组的停止保护操作。 :param StopProtectionGroupRequest request :return: StopProtectionGroupResponse """ return self.stop_protection_group_with_http_info(request) def stop_protection_group_with_http_info(self, request): """保护组停止保护 对某一个保护组的停止保护操作。 :param StopProtectionGroupRequest request :return: StopProtectionGroupResponse """ all_params = ['server_group_id', 'stop_protection_group_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}/action', method='POST', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='StopProtectionGroupResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def update_disaster_recovery_drill_name(self, request): """更新容灾演练名称 更新容灾演练的名称。 :param UpdateDisasterRecoveryDrillNameRequest request :return: UpdateDisasterRecoveryDrillNameResponse """ return self.update_disaster_recovery_drill_name_with_http_info(request) def update_disaster_recovery_drill_name_with_http_info(self, request): """更新容灾演练名称 更新容灾演练的名称。 :param UpdateDisasterRecoveryDrillNameRequest request :return: UpdateDisasterRecoveryDrillNameResponse """ all_params = ['disaster_recovery_drill_id', 'update_disaster_recovery_drill_name_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'disaster_recovery_drill_id' in local_var_params: path_params['disaster_recovery_drill_id'] = local_var_params['disaster_recovery_drill_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/disaster-recovery-drills/{disaster_recovery_drill_id}', method='PUT', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='UpdateDisasterRecoveryDrillNameResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def update_protected_instance_name(self, request): """更新保护实例名称 更新某一个保护实例的名称。 :param UpdateProtectedInstanceNameRequest request :return: UpdateProtectedInstanceNameResponse """ return self.update_protected_instance_name_with_http_info(request) def update_protected_instance_name_with_http_info(self, request): """更新保护实例名称 更新某一个保护实例的名称。 :param UpdateProtectedInstanceNameRequest request :return: UpdateProtectedInstanceNameResponse """ all_params = ['protected_instance_id', 'update_protected_instance_name_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'protected_instance_id' in local_var_params: path_params['protected_instance_id'] = local_var_params['protected_instance_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/protected-instances/{protected_instance_id}', method='PUT', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='UpdateProtectedInstanceNameResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def update_protection_group_name(self, request): """更新保护组名称 更新某一个保护组的名称。 :param UpdateProtectionGroupNameRequest request :return: UpdateProtectionGroupNameResponse """ return self.update_protection_group_name_with_http_info(request) def update_protection_group_name_with_http_info(self, request): """更新保护组名称 更新某一个保护组的名称。 :param UpdateProtectionGroupNameRequest request :return: UpdateProtectionGroupNameResponse """ all_params = ['server_group_id', 'update_protection_group_name_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'server_group_id' in local_var_params: path_params['server_group_id'] = local_var_params['server_group_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/server-groups/{server_group_id}', method='PUT', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='UpdateProtectionGroupNameResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def update_replication_name(self, request): """更新复制对名称 更新复制对名称。 :param UpdateReplicationNameRequest request :return: UpdateReplicationNameResponse """ return self.update_replication_name_with_http_info(request) def update_replication_name_with_http_info(self, request): """更新复制对名称 更新复制对名称。 :param UpdateReplicationNameRequest request :return: UpdateReplicationNameResponse """ all_params = ['replication_id', 'update_replication_name_request_body'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'replication_id' in local_var_params: path_params['replication_id'] = local_var_params['replication_id'] query_params = [] header_params = {} form_params = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json;charset=UTF-8']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/replications/{replication_id}', method='PUT', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='UpdateReplicationNameResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def list_api_versions(self, request): """查询API版本信息 查询存储容灾当前所有可用的版本信息列表。 :param ListApiVersionsRequest request :return: ListApiVersionsResponse """ return self.list_api_versions_with_http_info(request) def list_api_versions_with_http_info(self, request): """查询API版本信息 查询存储容灾当前所有可用的版本信息列表。 :param ListApiVersionsRequest request :return: ListApiVersionsResponse """ all_params = [] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ListApiVersionsResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_specified_api_version(self, request): """查询指定API版本信息 查询存储容灾指定API版本信息。 :param ShowSpecifiedApiVersionRequest request :return: ShowSpecifiedApiVersionResponse """ return self.show_specified_api_version_with_http_info(request) def show_specified_api_version_with_http_info(self, request): """查询指定API版本信息 查询存储容灾指定API版本信息。 :param ShowSpecifiedApiVersionRequest request :return: ShowSpecifiedApiVersionResponse """ all_params = ['api_version'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'api_version' in local_var_params: path_params['api_version'] = local_var_params['api_version'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/{api_version}', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowSpecifiedApiVersionResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def show_job_status(self, request): """查询job状态 查询job的执行状态。 对于创建保护组、删除保护组、创建保护实例、删除保护实例、创建复制对、删除复制对等异步API,命令下发后,会返回job_id,通过job_id可以查询任务的执行状态。 :param ShowJobStatusRequest request :return: ShowJobStatusResponse """ return self.show_job_status_with_http_info(request) def show_job_status_with_http_info(self, request): """查询job状态 查询job的执行状态。 对于创建保护组、删除保护组、创建保护实例、删除保护实例、创建复制对、删除复制对等异步API,命令下发后,会返回job_id,通过job_id可以查询任务的执行状态。 :param ShowJobStatusRequest request :return: ShowJobStatusResponse """ all_params = ['job_id'] local_var_params = {} for attr in request.attribute_map: if hasattr(request, attr): local_var_params[attr] = getattr(request, attr) collection_formats = {} path_params = {} if 'job_id' in local_var_params: path_params['job_id'] = local_var_params['job_id'] query_params = [] header_params = {} form_params = {} body_params = None if isinstance(request, SdkStreamRequest): body_params = request.get_file_stream() response_headers = [] header_params['Content-Type'] = http_utils.select_header_content_type( ['application/json']) auth_settings = [] return self.call_api( resource_path='/v1/{project_id}/jobs/{job_id}', method='GET', path_params=path_params, query_params=query_params, header_params=header_params, body=body_params, post_params=form_params, response_type='ShowJobStatusResponse', response_headers=response_headers, auth_settings=auth_settings, collection_formats=collection_formats, request_type=request.__class__.__name__) def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, response_type=None, response_headers=None, auth_settings=None, collection_formats=None, request_type=None): """Makes the HTTP request and returns deserialized data. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response_type: Response data type. :param response_headers: Header should be added to response data. :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param request_type: Request data type. :return: Return the response directly. """ return self.do_http_request( method=method, resource_path=resource_path, path_params=path_params, query_params=query_params, header_params=header_params, body=body, post_params=post_params, response_type=response_type, response_headers=response_headers, collection_formats=collection_formats, request_type=request_type)
32.107208
249
0.645516
10,178
105,119
6.251916
0.048733
0.037465
0.065565
0.025145
0.922052
0.906965
0.890212
0.864958
0.839609
0.745835
0
0.001294
0.271997
105,119
3,273
250
32.117018
0.830204
0.14007
0
0.832082
0
0
0.129784
0.085047
0
0
0
0
0
1
0.054185
false
0
0.005365
0
0.115343
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
dc4d56f6420be573d61e425ecf08b5918474bb3f
7,043
py
Python
tests/test_human_auth_sample.py
chyroc/pylark
a54cce6b814935fd3c72668b262b54c8ee461484
[ "Apache-2.0" ]
7
2021-08-18T00:42:05.000Z
2022-03-14T09:49:15.000Z
tests/test_human_auth_sample.py
chyroc/pylark
a54cce6b814935fd3c72668b262b54c8ee461484
[ "Apache-2.0" ]
null
null
null
tests/test_human_auth_sample.py
chyroc/pylark
a54cce6b814935fd3c72668b262b54c8ee461484
[ "Apache-2.0" ]
1
2022-03-14T09:49:20.000Z
2022-03-14T09:49:20.000Z
# Code generated by lark_sdk_gen. DO NOT EDIT. import unittest import pylark import pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope="scope", func="func", code=1, msg="mock-failed") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope="scope", func="func", code=1, msg="mock-raw-request-failed" ) # mock get token class TestHumanAuthSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestHumanAuthSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.human_auth def test_mock_get_token_get_face_verify_auth_result(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_face_verify_auth_result( pylark.GetFaceVerifyAuthResultReq() ) assert "msg=failed" in f"{e}" def test_mock_get_token_upload_face_verify_image(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.upload_face_verify_image(pylark.UploadFaceVerifyImageReq()) assert "msg=failed" in f"{e}" def test_mock_get_token_crop_face_verify_image(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.crop_face_verify_image(pylark.CropFaceVerifyImageReq()) assert "msg=failed" in f"{e}" def test_mock_get_token_create_identity(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_identity(pylark.CreateIdentityReq()) assert "msg=failed" in f"{e}" # mock mock self func class TestHumanAuthSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestHumanAuthSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.human_auth def test_mock_self_func_get_face_verify_auth_result(self): origin_func = self.module_cli.get_face_verify_auth_result self.module_cli.get_face_verify_auth_result = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_face_verify_auth_result( pylark.GetFaceVerifyAuthResultReq() ) assert "msg=mock-failed" in f"{e}" self.module_cli.get_face_verify_auth_result = origin_func def test_mock_self_func_upload_face_verify_image(self): origin_func = self.module_cli.upload_face_verify_image self.module_cli.upload_face_verify_image = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.upload_face_verify_image(pylark.UploadFaceVerifyImageReq()) assert "msg=mock-failed" in f"{e}" self.module_cli.upload_face_verify_image = origin_func def test_mock_self_func_crop_face_verify_image(self): origin_func = self.module_cli.crop_face_verify_image self.module_cli.crop_face_verify_image = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.crop_face_verify_image(pylark.CropFaceVerifyImageReq()) assert "msg=mock-failed" in f"{e}" self.module_cli.crop_face_verify_image = origin_func def test_mock_self_func_create_identity(self): origin_func = self.module_cli.create_identity self.module_cli.create_identity = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_identity(pylark.CreateIdentityReq()) assert "msg=mock-failed" in f"{e}" self.module_cli.create_identity = origin_func # mock raw request class TestHumanAuthSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestHumanAuthSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.human_auth self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_face_verify_auth_result(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_face_verify_auth_result( pylark.GetFaceVerifyAuthResultReq() ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert "mock-raw-request-failed" in e.value.msg def test_mock_raw_request_upload_face_verify_image(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.upload_face_verify_image(pylark.UploadFaceVerifyImageReq()) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert "mock-raw-request-failed" in e.value.msg def test_mock_raw_request_crop_face_verify_image(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.crop_face_verify_image(pylark.CropFaceVerifyImageReq()) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert "mock-raw-request-failed" in e.value.msg def test_mock_raw_request_create_identity(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_identity(pylark.CreateIdentityReq()) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert "mock-raw-request-failed" in e.value.msg # real request class TestHumanAuthSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestHumanAuthSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.human_auth def test_real_request_get_face_verify_auth_result(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_face_verify_auth_result( pylark.GetFaceVerifyAuthResultReq() ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_upload_face_verify_image(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.upload_face_verify_image(pylark.UploadFaceVerifyImageReq()) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_crop_face_verify_image(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.crop_face_verify_image(pylark.CropFaceVerifyImageReq()) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_identity(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_identity(pylark.CreateIdentityReq()) assert e.type is pylark.PyLarkError assert e.value.code > 0
38.07027
87
0.714042
911
7,043
5.192097
0.08562
0.069767
0.087949
0.059197
0.860888
0.843763
0.818816
0.790698
0.737844
0.69704
0
0.001777
0.200909
7,043
184
88
38.277174
0.838664
0.015476
0
0.576923
1
0
0.038684
0.016599
0
0
0
0
0.215385
1
0.169231
false
0
0.038462
0
0.238462
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
dc63a64fb2d7b1e90072a284bf5be574a0ef5c4d
160
py
Python
tools/experiments_mlflow/arguments.py
jmquintana79/utilsDS
1693810b6f10024542b30fdfedbfcd0518f32945
[ "MIT" ]
null
null
null
tools/experiments_mlflow/arguments.py
jmquintana79/utilsDS
1693810b6f10024542b30fdfedbfcd0518f32945
[ "MIT" ]
null
null
null
tools/experiments_mlflow/arguments.py
jmquintana79/utilsDS
1693810b6f10024542b30fdfedbfcd0518f32945
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # @Author: Juan Quintana # @Date: 2018-09-03 16:01:48 # @Last Modified by: Juan Quintana # @Last Modified time: 2018-09-03 16:02:05
26.666667
42
0.6375
27
160
3.777778
0.703704
0.235294
0.156863
0.196078
0
0
0
0
0
0
0
0.221374
0.18125
160
5
43
32
0.557252
0.93125
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
0
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
52434ea1a37b2021bcf413f0e98e641a8b18cb9e
3,475
py
Python
Tests/Test_Pillow_Resizing.py
brucewxh/IntraArchiveDeduplicator
7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb
[ "BSD-3-Clause" ]
86
2015-01-13T15:02:08.000Z
2021-12-24T02:13:03.000Z
Tests/Test_Pillow_Resizing.py
brucewxh/IntraArchiveDeduplicator
7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb
[ "BSD-3-Clause" ]
4
2016-11-18T20:08:50.000Z
2018-03-08T23:05:37.000Z
Tests/Test_Pillow_Resizing.py
brucewxh/IntraArchiveDeduplicator
7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb
[ "BSD-3-Clause" ]
12
2015-05-03T07:56:50.000Z
2021-03-11T12:38:56.000Z
import unittest from PIL import Image pre_scale_bw = [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 76, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 149, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 105, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 225, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 105, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 149, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 76, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 76, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 149, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 105, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 225, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0], ] simg_expect = [ 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 149, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 149, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 149, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0 ] class TestSequenceFunctions(unittest.TestCase): def test_image_rescale_1(self): img_dims = 20 simg_dims = 10 img = Image.new( 'L', (img_dims, img_dims)) pix = img.load() for y_idx, pxlist in enumerate(pre_scale_bw): for x_idx, pixval in enumerate(pxlist): pix[y_idx, x_idx] = pixval img_l = list(img.getdata()) # getdata results in a flat list, so flatten the input flat_l = [tmp for row in pre_scale_bw for tmp in row] # Check the image loaded OK self.assertEqual(img_l, flat_l) # Do the resize simg = img.resize((simg_dims, simg_dims), Image.NEAREST) # Check the resize resulted in the expected output simg_pix = list(simg.getdata()) self.assertEqual(simg_pix, simg_expect) if __name__ == '__main__': unittest.main()
45.12987
103
0.595396
635
3,475
3.20315
0.100787
1.312684
1.849558
2.306785
0.722222
0.722222
0.722222
0.722222
0.722222
0.722222
0
0.547142
0.224748
3,475
76
104
45.723684
0.207869
0.040576
0
0.037736
0
0
0.002704
0
0
0
0
0
0.037736
1
0.018868
false
0
0.037736
0
0.075472
0
0
0
0
null
1
1
1
0
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
bfdd4a9d21c399dee2a7c9a34ad92fe67a5abb33
114
py
Python
data/__init__.py
x-zho14/STR
ecf2a7977258313d7b7401b7f29d547abeb4ef7e
[ "Apache-2.0" ]
null
null
null
data/__init__.py
x-zho14/STR
ecf2a7977258313d7b7401b7f29d547abeb4ef7e
[ "Apache-2.0" ]
null
null
null
data/__init__.py
x-zho14/STR
ecf2a7977258313d7b7401b7f29d547abeb4ef7e
[ "Apache-2.0" ]
null
null
null
from data.imagenet import ImageNet from data.imagenet import TinyImageNet from data.cifar import CIFAR10, CIFAR100
38
40
0.859649
16
114
6.125
0.5
0.244898
0.326531
0.44898
0
0
0
0
0
0
0
0.04902
0.105263
114
3
40
38
0.911765
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
8741ec8972154f30195564f9251a7b8a30faf92c
20,613
py
Python
Wrappers/Python/test/test_Gradient.py
paskino/CIL
1803cbd445c408588fecbf705fb8b4df486029fc
[ "Apache-2.0" ]
null
null
null
Wrappers/Python/test/test_Gradient.py
paskino/CIL
1803cbd445c408588fecbf705fb8b4df486029fc
[ "Apache-2.0" ]
null
null
null
Wrappers/Python/test/test_Gradient.py
paskino/CIL
1803cbd445c408588fecbf705fb8b4df486029fc
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL). # Copyright 2017 UKRI-STFC # Copyright 2017 University of Manchester # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import unittest import numpy from cil.framework import ImageGeometry, AcquisitionGeometry from cil.framework import ImageData, AcquisitionData from cil.framework import BlockDataContainer import functools from cil.optimisation.operators import GradientOperator, BlockOperator from cil.optimisation.operators import LinearOperator class TestGradientOperator(unittest.TestCase): def test_GradientOperator(self): N, M, K = 20, 30, 40 channels = 10 numpy.random.seed(1) # check range geometry, examples ig1 = ImageGeometry(voxel_num_x = M, voxel_num_y = N) ig3 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, channels = channels) ig4 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, channels = channels, voxel_num_z= K) G1 = GradientOperator(ig1, correlation = 'Space', backend='numpy') print(G1.range_geometry().shape, '2D no channels') G4 = GradientOperator(ig3, correlation = 'SpaceChannels', backend='numpy') print(G4.range_geometry().shape, '2D with channels corr') G5 = GradientOperator(ig3, correlation = 'Space', backend='numpy') print(G5.range_geometry().shape, '2D with channels no corr') G6 = GradientOperator(ig4, correlation = 'Space', backend='numpy') print(G6.range_geometry().shape, '3D with channels no corr') G7 = GradientOperator(ig4, correlation = 'SpaceChannels', backend='numpy') print(G7.range_geometry().shape, '3D with channels with corr') u = ig1.allocate(ImageGeometry.RANDOM) w = G1.range_geometry().allocate(ImageGeometry.RANDOM) LHS = (G1.direct(u)*w).sum() RHS = (u * G1.adjoint(w)).sum() numpy.testing.assert_approx_equal(LHS, RHS, significant = 1) numpy.testing.assert_approx_equal(G1.norm(), numpy.sqrt(2*4), significant = 1) u1 = ig3.allocate('random') w1 = G4.range_geometry().allocate('random') LHS1 = (G4.direct(u1) * w1).sum() RHS1 = (u1 * G4.adjoint(w1)).sum() numpy.testing.assert_approx_equal(LHS1, RHS1, significant=1) numpy.testing.assert_almost_equal(G4.norm(), numpy.sqrt(3*4), decimal = 0) u2 = ig4.allocate('random') w2 = G7.range_geometry().allocate('random') LHS2 = (G7.direct(u2) * w2).sum() RHS2 = (u2 * G7.adjoint(w2)).sum() numpy.testing.assert_approx_equal(LHS2, RHS2, significant = 3) numpy.testing.assert_approx_equal(G7.norm(), numpy.sqrt(3*4), significant = 1) #check direct/adjoint for space/channels correlation ig_channel = ImageGeometry(voxel_num_x = 2, voxel_num_y = 3, channels = 2) G_no_channel = GradientOperator(ig_channel, correlation = 'Space', backend='numpy') G_channel = GradientOperator(ig_channel, correlation = 'SpaceChannels', backend='numpy') u3 = ig_channel.allocate('random_int') res_no_channel = G_no_channel.direct(u3) res_channel = G_channel.direct(u3) print(" Derivative for 3 directions, first is wrt Channel direction\n") print(res_channel[0].as_array()) print(res_channel[1].as_array()) print(res_channel[2].as_array()) print(" Derivative for 2 directions, no Channel direction\n") print(res_no_channel[0].as_array()) print(res_no_channel[1].as_array()) ig2D = ImageGeometry(voxel_num_x = 2, voxel_num_y = 3) u4 = ig2D.allocate('random_int') G2D = GradientOperator(ig2D, backend='numpy') res = G2D.direct(u4) print(res[0].as_array()) print(res[1].as_array()) M, N = 20, 30 ig = ImageGeometry(M, N) # check direct of GradientOperator and sparse matrix G = GradientOperator(ig, backend='numpy') norm1 = G.norm(iterations=300) print ("should be sqrt(8) {} {}".format(numpy.sqrt(8), norm1)) numpy.testing.assert_almost_equal(norm1, numpy.sqrt(8), decimal=1) ig4 = ImageGeometry(M,N, channels=3) G4 = GradientOperator(ig4, correlation="SpaceChannels", backend='numpy') norm4 = G4.norm(iterations=300) print("should be sqrt(12) {} {}".format(numpy.sqrt(12), norm4)) self.assertTrue((norm4 - numpy.sqrt(12))/norm4 < 0.2) def test_GradientOperator_4D(self): nc, nz, ny, nx = 3, 4, 5, 6 size = nc * nz * ny * nx dim = [nc, nz, ny, nx] ig = ImageGeometry(voxel_num_x=nx, voxel_num_y=ny, voxel_num_z=nz, channels=nc) arr = numpy.arange(size).reshape(dim).astype(numpy.float32)**2 data = ig.allocate() data.fill(arr) #neumann grad_py = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='numpy') gold_direct = grad_py.direct(data) gold_adjoint = grad_py.adjoint(gold_direct) grad_c = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='c') out_direct = grad_c.direct(data) out_adjoint = grad_c.adjoint(out_direct) #print("GradientOperator, 4D, bnd_cond='Neumann', direct") numpy.testing.assert_array_equal(out_direct.get_item(0).as_array(), gold_direct.get_item(0).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(1).as_array(), gold_direct.get_item(1).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(2).as_array(), gold_direct.get_item(2).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(3).as_array(), gold_direct.get_item(3).as_array()) #print("GradientOperator, 4D, bnd_cond='Neumann', adjoint") numpy.testing.assert_array_equal(out_adjoint.as_array(), gold_adjoint.as_array()) #periodic grad_py = GradientOperator(ig, bnd_cond='Periodic', correlation='SpaceChannels', backend='numpy') gold_direct = grad_py.direct(data) gold_adjoint = grad_py.adjoint(gold_direct) grad_c = GradientOperator(ig, bnd_cond='Periodic', correlation='SpaceChannels', backend='c') out_direct = grad_c.direct(data) out_adjoint = grad_c.adjoint(out_direct) #print("GradientOperator, 4D, bnd_cond='Periodic', direct") numpy.testing.assert_array_equal(out_direct.get_item(0).as_array(), gold_direct.get_item(0).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(1).as_array(), gold_direct.get_item(1).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(2).as_array(), gold_direct.get_item(2).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(3).as_array(), gold_direct.get_item(3).as_array()) #print("GradientOperator, 4D, bnd_cond='Periodic', adjoint") numpy.testing.assert_array_equal(out_adjoint.as_array(), gold_adjoint.as_array()) def test_GradientOperator_4D_allocate(self): nc, nz, ny, nx = 3, 4, 5, 6 size = nc * nz * ny * nx dim = [nc, nz, ny, nx] ig = ImageGeometry(voxel_num_x=nx, voxel_num_y=ny, voxel_num_z=nz, channels=nc) arr = numpy.arange(size).reshape(dim).astype(numpy.float32)**2 data = ig.allocate() data.fill(arr) #numpy grad1 = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='numpy') gold_direct = grad1.direct(data) gold_adjoint = grad1.adjoint(gold_direct) grad2 = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='numpy') out_direct = grad2.range_geometry().allocate() out_adjoint = grad2.domain_geometry().allocate() grad2.direct(data, out=out_direct) grad2.adjoint(out_direct, out=out_adjoint) #print("GradientOperatorOperator, 4D, bnd_cond='Neumann', direct") numpy.testing.assert_array_equal(out_direct.get_item(0).as_array(), gold_direct.get_item(0).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(1).as_array(), gold_direct.get_item(1).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(2).as_array(), gold_direct.get_item(2).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(3).as_array(), gold_direct.get_item(3).as_array()) #print("GradientOperator, 4D, bnd_cond='Neumann', adjoint") numpy.testing.assert_array_equal(out_adjoint.as_array(), gold_adjoint.as_array()) #c grad1 = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='c') gold_direct = grad1.direct(data) gold_adjoint = grad1.adjoint(gold_direct) grad2 = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='c') out_direct = grad2.range_geometry().allocate() out_adjoint = grad2.domain_geometry().allocate() grad2.direct(data, out=out_direct) grad2.adjoint(out_direct, out=out_adjoint) #print("GradientOperator, 4D, bnd_cond='Neumann', direct") numpy.testing.assert_array_equal(out_direct.get_item(0).as_array(), gold_direct.get_item(0).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(1).as_array(), gold_direct.get_item(1).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(2).as_array(), gold_direct.get_item(2).as_array()) numpy.testing.assert_array_equal(out_direct.get_item(3).as_array(), gold_direct.get_item(3).as_array()) #print("GradientOperator, 4D, bnd_cond='Neumann', adjoint") numpy.testing.assert_array_equal(out_adjoint.as_array(), gold_adjoint.as_array()) def test_GradientOperator_linearity(self): nc, nz, ny, nx = 3, 4, 5, 6 ig = ImageGeometry(voxel_num_x=nx, voxel_num_y=ny, voxel_num_z=nz, channels=nc) grad = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='c') self.assertTrue(LinearOperator.dot_test(grad)) grad = GradientOperator(ig, bnd_cond='Periodic', correlation='SpaceChannels', backend='c') self.assertTrue(LinearOperator.dot_test(grad)) grad = GradientOperator(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='numpy') self.assertTrue(LinearOperator.dot_test(grad)) grad = GradientOperator(ig, bnd_cond='Periodic', correlation='SpaceChannels', backend='numpy') self.assertTrue(LinearOperator.dot_test(grad)) def test_Gradient_c_numpy_voxel(self): numpy.random.seed(5) print("Test GradientOperator for 2D Geometry, ") ny, nx, nz = 3, 4, 5 ig = ImageGeometry(voxel_num_y = ny, voxel_num_x = nx, voxel_size_x=0.1, voxel_size_y=0.5) GD_C = GradientOperator(ig, backend = 'c') GD_numpy = GradientOperator(ig, backend = 'numpy') print("Check Gradient_C, Gradient_numpy norms") Gradient_C_norm = GD_C.norm() Gradient_numpy_norm = GD_numpy.norm() print(Gradient_C_norm, Gradient_numpy_norm) numpy.testing.assert_allclose(Gradient_C_norm, Gradient_numpy_norm, rtol=0.1) numpy.testing.assert_allclose(numpy.sqrt((2/ig.voxel_size_x)**2 + (2/ig.voxel_size_y)**2), Gradient_numpy_norm, rtol=0.1) numpy.testing.assert_allclose(numpy.sqrt((2/ig.voxel_size_x)**2 + (2/ig.voxel_size_y)**2), Gradient_C_norm, rtol=0.1) print("Test passed\n") print("Check dot test") self.assertTrue(GD_C.dot_test(GD_C)) self.assertTrue(GD_numpy.dot_test(GD_numpy)) print("Test passed\n") print("Check dot test for Gradient Numpy with different method/bdn_cond") G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) print("Test passed\n") print("Test GradientOperator for 2D Geometry passed\n") ########################################################################### ########################################################################### ########################################################################### ########################################################################### print("Test GradientOperator for 3D Geometry, ") ig = ImageGeometry(voxel_num_y = ny, voxel_num_x = nx, voxel_num_z = nz, voxel_size_x=0.1, voxel_size_y=0.5, voxel_size_z = 0.4) GD_C = GradientOperator(ig, backend = 'c') GD_numpy = GradientOperator(ig, backend = 'numpy') numpy.random.seed(5) print("Check Gradient_C, Gradient_numpy norms") Gradient_C_norm = GD_C.norm() Gradient_numpy_norm = GD_numpy.norm() numpy.testing.assert_allclose(Gradient_C_norm, Gradient_numpy_norm, rtol=0.1) numpy.testing.assert_allclose(numpy.sqrt((2/ig.voxel_size_z)**2 + (2/ig.voxel_size_x)**2 + (2/ig.voxel_size_y)**2), Gradient_numpy_norm, rtol=0.1) numpy.testing.assert_allclose(numpy.sqrt((2/ig.voxel_size_z)**2 + (2/ig.voxel_size_x)**2 + (2/ig.voxel_size_y)**2), Gradient_C_norm, rtol=0.1) print("Test passed\n") print("Check dot test") self.assertTrue(GD_C.dot_test(GD_C)) self.assertTrue(GD_numpy.dot_test(GD_numpy)) print("Test passed\n") print("Check dot test for GradientOperator Numpy with different method/bdn_cond") G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) print("Test passed\n") print("Test GradientOperator for 3D Geometry passed\n") ########################################################################### ########################################################################### ########################################################################### ########################################################################### print("Test GradientOperator for 2D Geometry + channels, ") ig = ImageGeometry(5,10, voxel_size_x=0.1, voxel_size_y=0.5, channels = 10) GD_C = GradientOperator(ig, backend = 'c') GD_numpy = GradientOperator(ig, backend = 'numpy') print("Check Gradient_C, Gradient_numpy norms") Gradient_C_norm = GD_C.norm() Gradient_numpy_norm = GD_numpy.norm() numpy.testing.assert_allclose(Gradient_C_norm, Gradient_numpy_norm, rtol=0.1) print("Test passed\n") print("Check dot test") self.assertTrue(GD_C.dot_test(GD_C)) self.assertTrue(GD_numpy.dot_test(GD_numpy)) print("Test passed\n") print("Check dot test for GradientOperator Numpy with different method/bdn_cond") G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) print("Test passed\n") print("Test GradientOperator for 2D Geometry + channels passed\n") ########################################################################### ########################################################################### ########################################################################### ########################################################################### print("Test GradientOperator for 3D Geometry + channels, ") ig = ImageGeometry(voxel_num_x = nx, voxel_num_y = ny, voxel_num_z=nz, voxel_size_x=0.1, voxel_size_y=0.5, voxel_size_z = 0.3, channels = 10) GD_C = GradientOperator(ig, backend = 'c') GD_numpy = GradientOperator(ig, backend = 'numpy') print("Check Gradient_C, Gradient_numpy norms") Gradient_C_norm = GD_C.norm() Gradient_numpy_norm = GD_numpy.norm() numpy.testing.assert_allclose(Gradient_C_norm, Gradient_numpy_norm, rtol=0.1) print("Test passed\n") print("Check dot test") self.assertTrue(GD_C.dot_test(GD_C)) self.assertTrue(GD_numpy.dot_test(GD_numpy)) print("Test passed\n") print("Check dot test for Gradient Numpy with different method/bdn_cond") G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Neumann') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'forward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'backward', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) G_numpy1 = GradientOperator(ig, method = 'centered', bnd_cond = 'Periodic') self.assertTrue(G_numpy1.dot_test(G_numpy1)) print("Test passed\n") print("Test GradientOperator for 3D Geometry + channels passed\n")
47.937209
155
0.617232
2,533
20,613
4.783261
0.09317
0.041598
0.051997
0.049521
0.816193
0.767002
0.728458
0.722846
0.719132
0.686118
0
0.023412
0.225004
20,613
429
156
48.048951
0.735023
0.063698
0
0.597786
0
0
0.115617
0
0
0
0
0
0.265683
1
0.01845
false
0.059041
0.03321
0
0.055351
0.180812
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
874f414d42bdf68722b1d1f806967f04771c6c25
13,516
py
Python
tests/nn/architectures/test_encoder_decoder.py
preeti98/sleap
203c3a03c0c54f8dab242611d9a8d24595e98081
[ "BSD-3-Clause-Clear" ]
null
null
null
tests/nn/architectures/test_encoder_decoder.py
preeti98/sleap
203c3a03c0c54f8dab242611d9a8d24595e98081
[ "BSD-3-Clause-Clear" ]
null
null
null
tests/nn/architectures/test_encoder_decoder.py
preeti98/sleap
203c3a03c0c54f8dab242611d9a8d24595e98081
[ "BSD-3-Clause-Clear" ]
null
null
null
import numpy as np import tensorflow as tf from sleap.nn.system import use_cpu_only; use_cpu_only() # hide GPUs for test from sleap.nn.architectures import encoder_decoder class EncoderDecoderTests(tf.test.TestCase): def test_simple_conv_block(self): block = encoder_decoder.SimpleConvBlock( pooling_stride=2, num_convs=3, filters=16, kernel_size=3, use_bias=True, batch_norm=False, batch_norm_before_activation=True, activation="relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 2 * 3 + 1) self.assertEqual(len(model.trainable_weights), 6) self.assertEqual(model.count_params(), 4800) self.assertAllEqual(model.output.shape, (None, 4, 4, 16)) def test_simple_conv_block_bn(self): block = encoder_decoder.SimpleConvBlock( pooling_stride=2, num_convs=3, filters=16, kernel_size=3, use_bias=True, batch_norm=True, batch_norm_before_activation=True, activation="relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 3 * 3 + 1) self.assertEqual(len(model.trainable_weights), 12) self.assertEqual(model.count_params(), 4992) self.assertAllEqual(model.output.shape, (None, 4, 4, 16)) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2D) self.assertIsInstance(model.layers[2], tf.keras.layers.BatchNormalization) self.assertIsInstance(model.layers[3], tf.keras.layers.Activation) def test_simple_conv_block_bn_post(self): block = encoder_decoder.SimpleConvBlock( pooling_stride=2, num_convs=3, filters=16, kernel_size=3, use_bias=True, batch_norm=True, batch_norm_before_activation=False, activation="relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 3 * 3 + 1) self.assertEqual(len(model.trainable_weights), 12) self.assertEqual(model.count_params(), 4992) self.assertAllEqual(model.output.shape, (None, 4, 4, 16)) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2D) self.assertIsInstance(model.layers[2], tf.keras.layers.Activation) self.assertIsInstance(model.layers[3], tf.keras.layers.BatchNormalization) def test_simple_conv_block_no_pool(self): block = encoder_decoder.SimpleConvBlock( pool=False, pooling_stride=2, num_convs=3, filters=16, kernel_size=3, use_bias=True, batch_norm=True, batch_norm_before_activation=True, activation="relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 3 * 3) self.assertEqual(len(model.trainable_weights), 12) self.assertEqual(model.count_params(), 4992) self.assertAllEqual(model.output.shape, (None, 8, 8, 16)) def test_simple_conv_block_pool_before_convs(self): block = encoder_decoder.SimpleConvBlock( pool=True, pool_before_convs=True, pooling_stride=2, num_convs=3, filters=16, kernel_size=3, use_bias=True, batch_norm=True, batch_norm_before_activation=True, activation="relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 3 * 3 + 1) self.assertEqual(len(model.trainable_weights), 12) self.assertEqual(model.count_params(), 4992) self.assertAllEqual(model.output.shape, (None, 4, 4, 16)) self.assertIsInstance(model.layers[1], tf.keras.layers.MaxPooling2D) self.assertIsInstance(model.layers[2], tf.keras.layers.Conv2D) self.assertIsInstance(model.layers[3], tf.keras.layers.BatchNormalization) self.assertIsInstance(model.layers[4], tf.keras.layers.Activation) def test_simple_upsampling_block(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 2) self.assertEqual(len(model.trainable_weights), 0) self.assertEqual(model.count_params(), 0) self.assertAllEqual(model.output.shape, (None, 16, 16, 1)) self.assertIsInstance(model.layers[1], tf.keras.layers.UpSampling2D) def test_simple_upsampling_block_trans_conv(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = True, transposed_conv_filters = 8, transposed_conv_kernel_size = 3, transposed_conv_use_bias = True, transposed_conv_batch_norm = True, transposed_conv_batch_norm_before_activation = True, transposed_conv_activation = "relu", refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 3) self.assertEqual(len(model.trainable_weights), 4) self.assertEqual(model.count_params(), 112) self.assertAllEqual(model.output.shape, (None, 16, 16, 8)) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2DTranspose) self.assertIsInstance(model.layers[2], tf.keras.layers.BatchNormalization) self.assertIsInstance(model.layers[3], tf.keras.layers.Activation) def test_simple_upsampling_block_trans_conv_bn_post(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = True, transposed_conv_filters = 8, transposed_conv_kernel_size = 3, transposed_conv_use_bias = True, transposed_conv_batch_norm = True, transposed_conv_batch_norm_before_activation = False, transposed_conv_activation = "relu", refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 1 + 3) self.assertEqual(len(model.trainable_weights), 4) self.assertEqual(model.count_params(), 112) self.assertAllEqual(model.output.shape, (None, 16, 16, 8)) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2DTranspose) self.assertIsInstance(model.layers[2], tf.keras.layers.Activation) self.assertIsInstance(model.layers[3], tf.keras.layers.BatchNormalization) def test_simple_upsampling_block_ignore_skip_source(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", skip_connection = False, skip_add = False, refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) skip_src = tf.keras.Input((16, 16, 1)) x = block.make_block(x_in, skip_source=skip_src) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 2) self.assertEqual(len(model.trainable_weights), 0) self.assertEqual(model.count_params(), 0) self.assertAllEqual(model.output.shape, (None, 16, 16, 1)) self.assertIsInstance(model.layers[1], tf.keras.layers.UpSampling2D) def test_simple_upsampling_block_skip_add(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", skip_connection = True, skip_add = True, refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) skip_src = tf.ones((1, 16, 16, 1)) x = block.make_block(x_in, skip_source=skip_src) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 3) self.assertEqual(len(model.trainable_weights), 0) self.assertEqual(model.count_params(), 0) self.assertAllEqual(model.output.shape, (None, 16, 16, 1)) self.assertIsInstance(model.layers[1], tf.keras.layers.UpSampling2D) self.assertTrue("add" in model.layers[2].name) self.assertAllClose(model(tf.ones((1, 8, 8, 1))), tf.ones((1, 16, 16, 1)) * 2) def test_simple_upsampling_block_skip_add_adjust_channels(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", skip_connection = True, skip_add = True, refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) skip_src = tf.keras.Input((16, 16, 4)) x = block.make_block(x_in, skip_source=skip_src) model = tf.keras.Model([x_in, skip_src], x) self.assertEqual(len(model.layers), 5) self.assertEqual(len(model.trainable_weights), 2) self.assertEqual(model.count_params(), 1+4) self.assertAllEqual(model.output.shape, (None, 16, 16, 1)) self.assertIsInstance(model.layers[3], tf.keras.layers.UpSampling2D) self.assertIsInstance(model.layers[2], tf.keras.layers.Conv2D) self.assertTrue("add" in model.layers[4].name) def test_simple_upsampling_block_skip_concat(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", skip_connection = True, skip_add = False, refine_convs = 0, ) x_in = tf.keras.Input((8, 8, 1)) skip_src = tf.keras.Input((16, 16, 4)) x = block.make_block(x_in, skip_source=skip_src) model = tf.keras.Model([x_in, skip_src], x) self.assertEqual(len(model.layers), 4) self.assertEqual(len(model.trainable_weights), 0) self.assertEqual(model.count_params(), 0) self.assertAllEqual(model.output.shape, (None, 16, 16, 5)) self.assertIsInstance(model.layers[2], tf.keras.layers.UpSampling2D) self.assertIsInstance(model.layers[3], tf.keras.layers.Concatenate) def test_simple_upsampling_block_refine_convs(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", skip_connection = True, refine_convs = 2, refine_convs_filters = 16, refine_convs_use_bias = True, refine_convs_kernel_size = 3, refine_convs_batch_norm = True, refine_convs_batch_norm_before_activation = True, refine_convs_activation = "relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 8) self.assertEqual(len(model.trainable_weights), 8) self.assertEqual(model.count_params(), 2608) self.assertAllEqual(model.output.shape, (None, 16, 16, 16)) self.assertIsInstance(model.layers[1], tf.keras.layers.UpSampling2D) self.assertIsInstance(model.layers[2], tf.keras.layers.Conv2D) self.assertIsInstance(model.layers[3], tf.keras.layers.BatchNormalization) self.assertIsInstance(model.layers[4], tf.keras.layers.Activation) def test_simple_upsampling_block_refine_convs_bn_post(self): block = encoder_decoder.SimpleUpsamplingBlock( upsampling_stride=2, transposed_conv = False, interp_method = "bilinear", skip_connection = True, refine_convs = 2, refine_convs_filters = 16, refine_convs_use_bias = True, refine_convs_kernel_size = 3, refine_convs_batch_norm = True, refine_convs_batch_norm_before_activation = False, refine_convs_activation = "relu", ) x_in = tf.keras.Input((8, 8, 1)) x = block.make_block(x_in) model = tf.keras.Model(x_in, x) self.assertEqual(len(model.layers), 8) self.assertEqual(len(model.trainable_weights), 8) self.assertEqual(model.count_params(), 2608) self.assertAllEqual(model.output.shape, (None, 16, 16, 16)) self.assertIsInstance(model.layers[1], tf.keras.layers.UpSampling2D) self.assertIsInstance(model.layers[2], tf.keras.layers.Conv2D) self.assertIsInstance(model.layers[3], tf.keras.layers.Activation) self.assertIsInstance(model.layers[4], tf.keras.layers.BatchNormalization)
41.460123
86
0.628218
1,652
13,516
4.929177
0.06477
0.053297
0.095174
0.118015
0.955299
0.938598
0.903353
0.889721
0.866511
0.858283
0
0.03314
0.261024
13,516
325
87
41.587692
0.782139
0.001332
0
0.782313
0
0
0.007262
0
0
0
0
0
0.306122
1
0.047619
false
0
0.013605
0
0.064626
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
875af798c34837d342aef51870410ce358cba40f
4,398
py
Python
exams/61a-su20-mt-solution/q1/tests/q1.py
jjllzhang/CS61A
57b68c7c06999210d96499f6d84e4ec99085d396
[ "MIT" ]
1
2022-01-22T11:45:01.000Z
2022-01-22T11:45:01.000Z
exams/61a-su20-mt-solution/q1/tests/q1.py
jjllzhang/CS61A
57b68c7c06999210d96499f6d84e4ec99085d396
[ "MIT" ]
null
null
null
exams/61a-su20-mt-solution/q1/tests/q1.py
jjllzhang/CS61A
57b68c7c06999210d96499f6d84e4ec99085d396
[ "MIT" ]
null
null
null
test = {'name': 'q1', 'points': 1, 'suites': [{'cases': [{'code': '>>> hacker = cat([1,2], 2)\n' '\n' '>>> hacker(1)\n' '\n' '>>> hacker(2)\n' "'Successfully unlocked!'\n" '\n' '>>> hacker = cat([1,2], 1)\n' '\n' '>>> hacker(1)\n' '\n' '>>> hacker(3) # used up attempts to gain ' 'access\n' '\n' '>>> hacker(2) # correct attempt to gain ' 'access, but already locked\n' "'The safe is now inaccessible!'\n" '\n' '>>> hacker = cat([1,2], 2)\n' '\n' '>>> hacker(1)\n' '\n' '>>> hacker(3) # 1 attempt left to gain ' 'access\n' '\n' '>>> hacker(2) # correct attempt to gain ' 'access\n' "'Successfully unlocked!'\n"}], 'scored': True, 'setup': 'from q1 import *', 'type': 'doctest'}, {'cases': [{'code': '>>> hacker = cat([1], 4)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' "'The safe is now inaccessible!'\n" '\n' '>>> hacker = cat([1], 4)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(1)\n' "'The safe is now inaccessible!'\n" '\n' '>>> hacker = cat([1], 4)\n' '\n' '>>> hacker(1)\n' "'Successfully unlocked!'\n" '\n' '>>> hacker = cat([1, 2, 3, 4, 5, 6], 4)\n' '\n' '>>> hacker(1)\n' '\n' '>>> hacker(2)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(4)\n' '\n' '>>> hacker(5)\n' '\n' '>>> hacker(6)\n' "'Successfully unlocked!'\n" '\n' '>>> hacker = cat([1,2,3,4], 2)\n' '\n' '>>> hacker(1)\n' '\n' '>>> hacker(2)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(3)\n' '\n' '>>> hacker(4)\n' "'Successfully unlocked!'\n"}], 'scored': True, 'setup': 'from q1 import *', 'type': 'doctest'}]}
46.787234
75
0.181446
261
4,398
3.057471
0.16092
0.090226
0.360902
0.157895
0.893484
0.869674
0.867168
0.867168
0.843358
0.798246
0
0.044572
0.68372
4,398
94
76
46.787234
0.529116
0
0
0.840426
0
0
0.284155
0
0
0
0
0
0
1
0
false
0
0.021277
0
0.021277
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
875eb9374556cff06511afdcf03257d5ec6772af
138
py
Python
server/tests/test_user_api.py
WagnerJM/opus_xml
382fa78473bc37199b142bde0aa14ace31c9cd10
[ "MIT" ]
null
null
null
server/tests/test_user_api.py
WagnerJM/opus_xml
382fa78473bc37199b142bde0aa14ace31c9cd10
[ "MIT" ]
null
null
null
server/tests/test_user_api.py
WagnerJM/opus_xml
382fa78473bc37199b142bde0aa14ace31c9cd10
[ "MIT" ]
null
null
null
import pytest import requests def test_user_login(): pass def test_user_logout(): pass def test_admin_register_user(): pass
12.545455
31
0.73913
20
138
4.75
0.55
0.221053
0.231579
0
0
0
0
0
0
0
0
0
0.195652
138
11
32
12.545455
0.855856
0
0
0.375
0
0
0
0
0
0
0
0
0
1
0.375
true
0.375
0.25
0
0.625
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
1
0
0
7
87780d816b6ece42dcf2893dcf0ca717d4177c53
14,984
py
Python
test_area/encode64/encode_img.py
umiphos/python-exercises
a834fb63d3e447e3df096543c0e1850ecc020ffe
[ "Apache-2.0" ]
null
null
null
test_area/encode64/encode_img.py
umiphos/python-exercises
a834fb63d3e447e3df096543c0e1850ecc020ffe
[ "Apache-2.0" ]
null
null
null
test_area/encode64/encode_img.py
umiphos/python-exercises
a834fb63d3e447e3df096543c0e1850ecc020ffe
[ "Apache-2.0" ]
null
null
null
import base64 img_data = b'iVBORw0KGgoAAAANSUhEUgAABoIAAAaCCAYAAAABZu+EAAAqOElEQVR42uzBAQEAAACAkP6v7ggK\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACA2YMDAQAAAAAg\n/9dGUFVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVWkPDgkA\nAAAABP1/7QobAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAIcAeHkAAeLqlDIAAAAASUVORK5CYII=' fh = open("imageToSave.png", "wb") fh.write(img_data.decode('base64')) fh.close()
1,362.181818
14,882
0.98445
212
14,984
69.570755
0.113208
1.179877
1.738491
2.276222
0.965828
0.965828
0.965828
0.965828
0.965828
0.965828
0
0.000935
0.001068
14,984
10
14,883
1,498.4
0.984433
0
0
0
0
0.2
0.993992
0.992457
0
1
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
1
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
15
5e5ca6253874950abdffa5822c3cae396cd60b7b
4,984
py
Python
tests/cli/test_merge.py
IRDonch/datumaro
d029e67549b7359c887bd15039997bd8bbae7c0c
[ "MIT" ]
237
2020-09-07T14:29:26.000Z
2022-03-31T12:53:48.000Z
tests/cli/test_merge.py
IRDonch/datumaro
d029e67549b7359c887bd15039997bd8bbae7c0c
[ "MIT" ]
330
2020-09-09T21:27:29.000Z
2022-03-31T12:36:18.000Z
tests/cli/test_merge.py
IRDonch/datumaro
d029e67549b7359c887bd15039997bd8bbae7c0c
[ "MIT" ]
63
2020-09-09T07:44:28.000Z
2022-03-17T16:07:26.000Z
from unittest import TestCase import os.path as osp import numpy as np from datumaro.components.annotation import ( AnnotationType, Bbox, LabelCategories, MaskCategories, ) from datumaro.components.extractor import DatasetItem from datumaro.components.project import Dataset, Project from datumaro.util.test_utils import TestDir, compare_datasets from datumaro.util.test_utils import run_datum as run import datumaro.plugins.voc_format.format as VOC from ..requirements import Requirements, mark_requirement class MergeTest(TestCase): @mark_requirement(Requirements.DATUM_GENERAL_REQ) def test_can_run_self_merge(self): dataset1 = Dataset.from_iterable([ DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), annotations=[ Bbox(1, 2, 3, 3, label=0), ]), ], categories=['a', 'b']) dataset2 = Dataset.from_iterable([ DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), annotations=[ Bbox(1, 2, 3, 4, label=1), Bbox(5, 6, 2, 3, label=2), ]), ], categories=['a', 'b', 'c']) expected = Dataset.from_iterable([ DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), annotations=[ Bbox(1, 2, 3, 4, label=2, id=1, group=1, attributes={'score': 0.5, 'occluded': False, 'difficult': False, 'truncated': False}), Bbox(5, 6, 2, 3, label=3, id=2, group=2, attributes={'score': 0.5, 'occluded': False, 'difficult': False, 'truncated': False}), Bbox(1, 2, 3, 3, label=1, id=1, group=1, attributes={'score': 0.5, 'is_crowd': False}), ]), ], categories={ AnnotationType.label: LabelCategories.from_iterable( ['background', 'a', 'b', 'c']), AnnotationType.mask: MaskCategories(VOC.generate_colormap(4)) }) with TestDir() as test_dir: dataset1_url = osp.join(test_dir, 'dataset1') dataset2_url = osp.join(test_dir, 'dataset2') dataset1.export(dataset1_url, 'coco', save_images=True) dataset2.export(dataset2_url, 'voc', save_images=True) proj_dir = osp.join(test_dir, 'proj') with Project.init(proj_dir) as project: project.import_source('source', dataset2_url, 'voc') result_dir = osp.join(test_dir, 'cmp_result') run(self, 'merge', dataset1_url + ':coco', '-o', result_dir, '-p', proj_dir) compare_datasets(self, expected, Dataset.load(result_dir), require_images=True) @mark_requirement(Requirements.DATUM_GENERAL_REQ) def test_can_run_multimerge(self): dataset1 = Dataset.from_iterable([ DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), annotations=[ Bbox(1, 2, 3, 3, label=0), ]), ], categories=['a', 'b']) dataset2 = Dataset.from_iterable([ DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), annotations=[ Bbox(1, 2, 3, 4, label=1), Bbox(5, 6, 2, 3, label=2), ]), ], categories=['a', 'b', 'c']) expected = Dataset.from_iterable([ DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), annotations=[ Bbox(1, 2, 3, 4, label=2, id=1, group=1, attributes={'score': 0.5, 'occluded': False, 'difficult': False, 'truncated': False}), Bbox(5, 6, 2, 3, label=3, id=2, group=2, attributes={'score': 0.5, 'occluded': False, 'difficult': False, 'truncated': False}), Bbox(1, 2, 3, 3, label=1, id=1, group=1, attributes={'score': 0.5, 'is_crowd': False}), ]), ], categories={ AnnotationType.label: LabelCategories.from_iterable( ['background', 'a', 'b', 'c']), AnnotationType.mask: MaskCategories(VOC.generate_colormap(4)) }) with TestDir() as test_dir: dataset1_url = osp.join(test_dir, 'dataset1') dataset2_url = osp.join(test_dir, 'dataset2') dataset1.export(dataset1_url, 'coco', save_images=True) dataset2.export(dataset2_url, 'voc', save_images=True) result_dir = osp.join(test_dir, 'cmp_result') run(self, 'merge', dataset2_url + ':voc', dataset1_url + ':coco', '-o', result_dir) compare_datasets(self, expected, Dataset.load(result_dir), require_images=True)
41.533333
77
0.535112
555
4,984
4.675676
0.178378
0.009249
0.018497
0.02158
0.806166
0.799615
0.757611
0.757611
0.757611
0.757611
0
0.045306
0.326846
4,984
119
78
41.882353
0.728167
0
0
0.78
0
0
0.065209
0
0
0
0
0
0
1
0.02
false
0
0.11
0
0.14
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
5e68cda50834de53b037be375850c97af8aa7320
3,261
py
Python
labman/db/tests/test_configuration_manager.py
antgonza/labman
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
[ "BSD-3-Clause" ]
null
null
null
labman/db/tests/test_configuration_manager.py
antgonza/labman
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
[ "BSD-3-Clause" ]
null
null
null
labman/db/tests/test_configuration_manager.py
antgonza/labman
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
[ "BSD-3-Clause" ]
null
null
null
# ---------------------------------------------------------------------------- # Copyright (c) 2017-, labman development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from unittest import main, TestCase from tempfile import NamedTemporaryFile from labman.db.configuration_manager import ConfigurationManager class TestConfigurationManager(TestCase): def test_create(self): with NamedTemporaryFile() as tmp_f: ConfigurationManager.create( tmp_f.name, True, '/path/to/server.cert', '/path/to/server.key', '/path/to/cookie_secret.bla', 'db_host', 'db_port', 'db_name', 'db_user', 'db_password', 'db_admin_user', 'db_admin_password', '/path/to/logdir', '') with open(tmp_f.name) as obs_f: obs = obs_f.read() obs = obs.splitlines() exp = EXP_CONFIG_FILE.splitlines() # Removing the first line as it contains a date that is generated # when the test is run self.assertEqual(obs[1:], exp) def test_create_qiita(self): with NamedTemporaryFile() as tmp_f: ConfigurationManager.create( tmp_f.name, True, '/path/to/server.cert', '/path/to/server.key', '/path/to/cookie_secret.bla', 'db_host', 'db_port', 'db_name', 'db_user', 'db_password', 'db_admin_user', 'db_admin_password', '/path/to/logdir', 'server_cert') with open(tmp_f.name) as obs_f: obs = obs_f.read() obs = obs.splitlines() exp = EXP_CONFIG_FILE_QIITA.splitlines() # Removing the first line as it contains a date that is generated # when the test is run self.assertEqual(obs[1:], exp) EXP_CONFIG_FILE = """ # ------------------------- MAIN SETTINGS ---------------------------------- [main] TEST_ENVIRONMENT=True LOG_DIR=/path/to/logdir CERTIFICATE_FILEPATH=/path/to/server.cert KEY_FILEPATH=/path/to/server.key COOKIE_SECRET=/path/to/cookie_secret.bla # ----------------------- POSTGRES SETTINGS -------------------------------- [postgres] USER=db_user PASSWORD=db_password ADMIN_USER=db_admin_user ADMIN_PASSWORD=db_admin_password DATABASE=db_name HOST=db_host PORT=db_port # ------------------------- QIITA SETTINGS ---------------------------------- [qiita] SERVER_CERT= """ EXP_CONFIG_FILE_QIITA = """ # ------------------------- MAIN SETTINGS ---------------------------------- [main] TEST_ENVIRONMENT=True LOG_DIR=/path/to/logdir CERTIFICATE_FILEPATH=/path/to/server.cert KEY_FILEPATH=/path/to/server.key COOKIE_SECRET=/path/to/cookie_secret.bla # ----------------------- POSTGRES SETTINGS -------------------------------- [postgres] USER=db_user PASSWORD=db_password ADMIN_USER=db_admin_user ADMIN_PASSWORD=db_admin_password DATABASE=db_name HOST=db_host PORT=db_port # ------------------------- QIITA SETTINGS ---------------------------------- [qiita] SERVER_CERT=server_cert """ if __name__ == '__main__': main()
30.764151
79
0.560871
367
3,261
4.73842
0.234332
0.055204
0.055204
0.036803
0.768258
0.768258
0.768258
0.768258
0.768258
0.768258
0
0.002293
0.197485
3,261
105
80
31.057143
0.662209
0.153021
0
0.756757
0
0
0.534545
0.308
0
0
0
0
0.027027
1
0.027027
false
0.108108
0.040541
0
0.081081
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
5e753f50df857f0a133dd97579a32997593132a9
131
py
Python
engine/gui/__init__.py
UnidayStudio/Easy-2D-Game-Engine
1a8501cba538d7542b0e24bf64eead388085480f
[ "MIT" ]
8
2019-12-15T22:32:30.000Z
2021-06-14T07:38:51.000Z
engine/gui/__init__.py
UnidayStudio/Easy-2D-Game-Engine
1a8501cba538d7542b0e24bf64eead388085480f
[ "MIT" ]
null
null
null
engine/gui/__init__.py
UnidayStudio/Easy-2D-Game-Engine
1a8501cba538d7542b0e24bf64eead388085480f
[ "MIT" ]
2
2020-09-10T17:34:23.000Z
2021-03-11T09:26:26.000Z
from engine.gui.canvas import * from engine.gui.button import * from engine.gui.textbox import * from engine.gui.healthbar import *
32.75
34
0.793893
20
131
5.2
0.4
0.384615
0.5
0.548077
0
0
0
0
0
0
0
0
0.114504
131
4
34
32.75
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
5e892658b84fdb156845395d1e238c94bcc5818e
22,551
py
Python
PMoE/model/augmenter.py
iasbs-isg/PMoE
37a273ff393e4f43c38c7fff9271218efe1d3bd1
[ "MIT" ]
null
null
null
PMoE/model/augmenter.py
iasbs-isg/PMoE
37a273ff393e4f43c38c7fff9271218efe1d3bd1
[ "MIT" ]
1
2021-09-29T18:46:45.000Z
2021-09-29T18:46:45.000Z
PMoE/model/augmenter.py
iasbs-isg/PMoE
37a273ff393e4f43c38c7fff9271218efe1d3bd1
[ "MIT" ]
1
2021-09-29T18:50:12.000Z
2021-09-29T18:50:12.000Z
####################################################################################################### # This file is borrowed from COiLTRAiNE https://github.com/felipecode/coiltraine by Felipe Codevilla # # COiLTRAiNE itself is under MIT License # ####################################################################################################### from PIL import Image from typing import Optional import numpy as np import torch from imgaug import augmenters as iaa DEBUG = False def get_augmenter( iteration: Optional[int] = 1, bsz: Optional[int] = 32, aug_type: str = "medium" ): if aug_type == "medium": return medium(iteration, bsz) elif aug_type == "soft": return soft(iteration, bsz) elif aug_type == "high": return high(iteration, bsz) elif aug_type == "medium_harder": return medium_harder(iteration, bsz) elif aug_type == "super_hard": return super_hard(iteration, bsz) elif aug_type == "custom": return custom(iteration, bsz) elif aug_type == "soft_harder": return soft_harder(iteration, bsz) elif aug_type == "segmentation": return seg_aug() else: raise ValueError( "Unknown augmentation, value should be one of" "'medium', 'high', 'medium_harder', 'super_hard', 'soft_harder', 'custom'" ) class Crop: def __init__(self, crop_size): self.top = crop_size[0] self.bottom = crop_size[1] def __call__(self, img): return Image.fromarray(img[self.top : -self.bottom]) class MaskPILToTensor: def __call__(self, img): return torch.from_numpy(np.array(img)).long() def seg_aug(): augmenter = iaa.Sequential( [ iaa.Sometimes(0.3, iaa.GaussianBlur()), # blur images with a sigma between 0 and 1.5 iaa.Sometimes(0.3, iaa.AdditiveGaussianNoise(per_channel=True)), # add gaussian noise to images iaa.Sometimes( 0.1, iaa.CoarseDropout(size_percent=(0.08, 0.2), per_channel=True) ), # randomly remove up to X% of the pixels iaa.Sometimes(0.1, iaa.Dropout(per_channel=True)), # randomly remove up to X% of the pixels iaa.Sometimes(0.2, iaa.LinearContrast(per_channel=True)), # improve or worsen the contrast ], random_order=True, # do all of the above in random order ) return augmenter def medium(image_iteration: int = 1, bsz: int = 32): iteration = image_iteration / (bsz * 1.5) frequency_factor = 0.05 + float(iteration) / 1000000.0 color_factor = float(iteration) / 1000000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 100000.0) add_factor = 10 + 10 * iteration / 150000.0 multiply_factor_pos = 1 + (2.5 * iteration / 500000.0) multiply_factor_neg = 1 - (0.91 * iteration / 500000.0) contrast_factor_pos = 1 + (0.5 * iteration / 500000.0) contrast_factor_neg = 1 - (0.5 * iteration / 500000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }, {add_factor = }, " f"{multiply_factor_pos = }, {multiply_factor_neg = }, " f"{contrast_factor_pos = }, {contrast_factor_neg = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # add gaussian noise to images iaa.Sometimes( frequency_factor, iaa.CoarseDropout( (0.0, dropout_factor), size_percent=(0.08, 0.2), per_channel=color_factor, ), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Add((-add_factor, add_factor), per_channel=color_factor), ), # change brightness of images (by -X to Y of original value) iaa.Sometimes( frequency_factor, iaa.Multiply( (multiply_factor_neg, multiply_factor_pos), per_channel=color_factor ), ), # change brightness of images (X-Y% of original value) iaa.Sometimes( frequency_factor, iaa.LinearContrast( (contrast_factor_neg, contrast_factor_pos), per_channel=color_factor ), ), # improve or worsen the contrast iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter def soft(image_iteration: int = 1, bsz: int = 32): iteration = image_iteration / (bsz * 1.5) frequency_factor = 0.05 + float(iteration) / 1200000.0 color_factor = float(iteration) / 1200000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 120000.0) add_factor = 10 + 10 * iteration / 170000.0 multiply_factor_pos = 1 + (2.5 * iteration / 800000.0) multiply_factor_neg = 1 - (0.91 * iteration / 800000.0) contrast_factor_pos = 1 + (0.5 * iteration / 800000.0) contrast_factor_neg = 1 - (0.5 * iteration / 800000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }, {add_factor = }, " f"{multiply_factor_pos = }, {multiply_factor_neg = }, " f"{contrast_factor_pos = }, {contrast_factor_neg = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # add gaussian noise to images iaa.Sometimes( frequency_factor, iaa.CoarseDropout( (0.0, dropout_factor), size_percent=(0.08, 0.2), per_channel=color_factor, ), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Add((-add_factor, add_factor), per_channel=color_factor), ), # change brightness of images (by -X to Y of original value) iaa.Sometimes( frequency_factor, iaa.Multiply( (multiply_factor_neg, multiply_factor_pos), per_channel=color_factor ), ), # change brightness of images (X-Y% of original value) iaa.Sometimes( frequency_factor, iaa.LinearContrast( (contrast_factor_neg, contrast_factor_pos), per_channel=color_factor ), ), # improve or worsen the contrast iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter def high(image_iteration: int = 1, bsz: int = 32): iteration = image_iteration / (bsz * 1.5) frequency_factor = 0.05 + float(iteration) / 800000.0 color_factor = float(iteration) / 800000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 80000.0) add_factor = 10 + 10 * iteration / 120000.0 multiply_factor_pos = 1 + (2.5 * iteration / 350000.0) multiply_factor_neg = 1 - (0.91 * iteration / 400000.0) contrast_factor_pos = 1 + (0.5 * iteration / 350000.0) contrast_factor_neg = 1 - (0.5 * iteration / 400000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }, {add_factor = }, " f"{multiply_factor_pos = }, {multiply_factor_neg = }, " f"{contrast_factor_pos = }, {contrast_factor_neg = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # add gaussian noise to images iaa.Sometimes( frequency_factor, iaa.CoarseDropout( (0.0, dropout_factor), size_percent=(0.08, 0.2), per_channel=color_factor, ), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Add((-add_factor, add_factor), per_channel=color_factor), ), # change brightness of images (by -X to Y of original value) iaa.Sometimes( frequency_factor, iaa.Multiply( (multiply_factor_neg, multiply_factor_pos), per_channel=color_factor ), ), # change brightness of images (X-Y% of original value) iaa.Sometimes( frequency_factor, iaa.LinearContrast( (contrast_factor_neg, contrast_factor_pos), per_channel=color_factor ), ), # improve or worsen the contrast iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter def medium_harder(image_iteration: int = 1, bsz: int = 32): iteration = image_iteration / bsz frequency_factor = 0.05 + float(iteration) / 1000000.0 color_factor = float(iteration) / 1000000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 100000.0) add_factor = 10 + 10 * iteration / 150000.0 multiply_factor_pos = 1 + (2.5 * iteration / 500000.0) multiply_factor_neg = 1 - (0.91 * iteration / 500000.0) contrast_factor_pos = 1 + (0.5 * iteration / 500000.0) contrast_factor_neg = 1 - (0.5 * iteration / 500000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }, {add_factor = }, " f"{multiply_factor_pos = }, {multiply_factor_neg = }, " f"{contrast_factor_pos = }, {contrast_factor_neg = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # add gaussian noise to images iaa.Sometimes( frequency_factor, iaa.CoarseDropout( (0.0, dropout_factor), size_percent=(0.08, 0.2), per_channel=color_factor, ), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Add((-add_factor, add_factor), per_channel=color_factor), ), # change brightness of images (by -X to Y of original value) iaa.Sometimes( frequency_factor, iaa.Multiply( (multiply_factor_neg, multiply_factor_pos), per_channel=color_factor ), ), # change brightness of images (X-Y% of original value) iaa.Sometimes( frequency_factor, iaa.LinearContrast( (contrast_factor_neg, contrast_factor_pos), per_channel=color_factor ), ), # improve or worsen the contrast iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter def super_hard(image_iteration: int = 1, bsz: int = 32): """ modified """ iteration = image_iteration / bsz frequency_factor = min(0.05 + float(iteration) / 50000.0, 1.0) color_factor = float(iteration) / 100000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 100000.0) add_factor = 10 + 10 * iteration / 100000.0 multiply_factor_pos = 1 + (2.5 * iteration / 200000.0) multiply_factor_neg = 1 - (0.91 * iteration / 500000.0) contrast_factor_pos = 1 + (0.5 * iteration / 500000.0) contrast_factor_neg = 1 - (0.5 * iteration / 500000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }, {add_factor = }, " f"{multiply_factor_pos = }, {multiply_factor_neg = }, " f"{contrast_factor_pos = }, {contrast_factor_neg = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # add gaussian noise to images iaa.Sometimes( frequency_factor, iaa.CoarseDropout( (0.0, dropout_factor), size_percent=(0.08, 0.2), per_channel=color_factor, ), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Add((-add_factor, add_factor), per_channel=color_factor), ), # change brightness of images (by -X to Y of original value) iaa.Sometimes( frequency_factor, iaa.Multiply( (multiply_factor_neg, multiply_factor_pos), per_channel=color_factor ), ), # change brightness of images (X-Y% of original value) iaa.Sometimes( frequency_factor, iaa.LinearContrast( (contrast_factor_neg, contrast_factor_pos), per_channel=color_factor ), ), # improve or worsen the contrast # iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter def custom(image_iteration: int = 1, bsz: int = 32): """ modified """ iteration = image_iteration / bsz frequency_factor = min(0.05 + float(iteration) / 50000.0, 1.0) color_factor = float(iteration) / 100000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 20000.0) # add_factor = 10 + 10 * iteration / 100000.0 # print (add_factor) # multiply_factor_pos = 1 + (2.5 * iteration / 300000.0) # multiply_factor_neg = 1 - (0.91 * iteration / 300000.0) # # contrast_factor_pos = 1 + (0.2 * iteration / 500000.0) # contrast_factor_neg = 1 - (0.5 * iteration / 500000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # # add gaussian noise to images # iaa.Sometimes(frequency_factor, iaa.CoarseDropout((0.0, dropout_factor), size_percent=( # 0.08, 0.2), per_channel=color_factor)), # # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes(frequency_factor, iaa.Add((-30, 30), per_channel=False)), # # change brightness of images (by -X to Y of original value) iaa.Sometimes(frequency_factor, iaa.Multiply((0.9, 1.3), per_channel=True)), # # change brightness of images (X-Y% of original value) # iaa.Sometimes(frequency_factor, iaa.LinearContrast((0.1,0.5), # per_channel=True)), # improve or worsen the contrast # iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter def soft_harder(image_iteration: int = 1, bsz: int = 32): iteration = image_iteration / bsz frequency_factor = 0.05 + float(iteration) / 1200000.0 color_factor = float(iteration) / 1200000.0 dropout_factor = 0.198667 + (0.03856658 - 0.198667) / ( 1 + (iteration / 196416.6) ** 1.863486 ) blur_factor = 0.5 + (0.5 * iteration / 120000.0) add_factor = 10 + 10 * iteration / 170000.0 multiply_factor_pos = 1 + (2.5 * iteration / 800000.0) multiply_factor_neg = 1 - (0.91 * iteration / 800000.0) contrast_factor_pos = 1 + (0.5 * iteration / 800000.0) contrast_factor_neg = 1 - (0.5 * iteration / 800000.0) if DEBUG: print( f"Augment Status: {frequency_factor = }, {color_factor = }, {dropout_factor = }, " f"{blur_factor = }, {add_factor = }, " f"{multiply_factor_pos = }, {multiply_factor_neg = }, " f"{contrast_factor_pos = }, {contrast_factor_neg = }" ) augmenter = iaa.Sequential( [ iaa.Sometimes(frequency_factor, iaa.GaussianBlur((0, blur_factor))), # blur images with a sigma between 0 and 1.5 iaa.Sometimes( frequency_factor, iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, dropout_factor), per_channel=color_factor ), ), # add gaussian noise to images iaa.Sometimes( frequency_factor, iaa.CoarseDropout( (0.0, dropout_factor), size_percent=(0.08, 0.2), per_channel=color_factor, ), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Dropout((0.0, dropout_factor), per_channel=color_factor), ), # randomly remove up to X% of the pixels iaa.Sometimes( frequency_factor, iaa.Add((-add_factor, add_factor), per_channel=color_factor), ), # change brightness of images (by -X to Y of original value) iaa.Sometimes( frequency_factor, iaa.Multiply( (multiply_factor_neg, multiply_factor_pos), per_channel=color_factor ), ), # change brightness of images (X-Y% of original value) iaa.Sometimes( frequency_factor, iaa.LinearContrast( (contrast_factor_neg, contrast_factor_pos), per_channel=color_factor ), ), # improve or worsen the contrast iaa.Sometimes(frequency_factor, iaa.Grayscale((0.0, 1))), # put grayscale ], random_order=True, # do all of the above in random order ) return augmenter
36.787928
103
0.540952
2,477
22,551
4.733952
0.067017
0.089545
0.10029
0.128944
0.920092
0.904059
0.890841
0.885298
0.860737
0.860737
0
0.070229
0.350273
22,551
612
104
36.848039
0.730071
0.156179
0
0.79386
0
0
0.084425
0
0
0
0
0
0
1
0.026316
false
0
0.010965
0.004386
0.08114
0.015351
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
0d6fdbe09d2eb0915ca9be11f7895c6f1a54a098
31,860
py
Python
pyteal/compiler/compiler_test.py
CiottiGiorgio/pyteal
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
[ "MIT" ]
null
null
null
pyteal/compiler/compiler_test.py
CiottiGiorgio/pyteal
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
[ "MIT" ]
1
2022-03-04T14:57:57.000Z
2022-03-04T14:57:57.000Z
pyteal/compiler/compiler_test.py
CiottiGiorgio/pyteal
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
[ "MIT" ]
null
null
null
import pytest import pyteal as pt def test_compile_single(): expr = pt.Int(1) expected = """ #pragma version 2 int 1 return """.strip() actual_application = pt.compileTeal(expr, pt.Mode.Application) actual_signature = pt.compileTeal(expr, pt.Mode.Signature) assert actual_application == actual_signature assert actual_application == expected def test_compile_sequence(): expr = pt.Seq([pt.Pop(pt.Int(1)), pt.Pop(pt.Int(2)), pt.Int(3) + pt.Int(4)]) expected = """ #pragma version 2 int 1 pop int 2 pop int 3 int 4 + return """.strip() actual_application = pt.compileTeal(expr, pt.Mode.Application) actual_signature = pt.compileTeal(expr, pt.Mode.Signature) assert actual_application == actual_signature assert actual_application == expected def test_compile_branch(): expr = pt.If(pt.Int(1)).Then(pt.Int(2)).Else(pt.Int(3)) expected = """ #pragma version 2 int 1 bnz main_l2 int 3 b main_l3 main_l2: int 2 main_l3: return """.strip() actual_application = pt.compileTeal(expr, pt.Mode.Application) actual_signature = pt.compileTeal(expr, pt.Mode.Signature) assert actual_application == actual_signature assert actual_application == expected def test_compile_branch_multiple(): expr = ( pt.If(pt.Int(1)) .Then(pt.Int(2)) .ElseIf(pt.Int(3)) .Then(pt.Int(4)) .Else(pt.Int(5)) ) expected = """ #pragma version 2 int 1 bnz main_l4 int 3 bnz main_l3 int 5 b main_l5 main_l3: int 4 b main_l5 main_l4: int 2 main_l5: return """.strip() actual_application = pt.compileTeal(expr, pt.Mode.Application) actual_signature = pt.compileTeal(expr, pt.Mode.Signature) assert actual_application == actual_signature assert actual_application == expected def test_empty_branch(): program = pt.Seq( [ pt.If(pt.Txn.application_id() == pt.Int(0)).Then(pt.Seq()), pt.Approve(), ] ) expected = """#pragma version 5 txn ApplicationID int 0 == bnz main_l1 main_l1: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=5, assembleConstants=False ) assert actual == expected def test_compile_mode(): expr = pt.App.globalGet(pt.Bytes("key")) expected = """ #pragma version 2 byte "key" app_global_get return """.strip() actual_application = pt.compileTeal(expr, pt.Mode.Application) assert actual_application == expected with pytest.raises(pt.TealInputError): pt.compileTeal(expr, pt.Mode.Signature) def test_compile_version_invalid(): expr = pt.Int(1) with pytest.raises(pt.TealInputError): pt.compileTeal(expr, pt.Mode.Signature, version=1) # too small with pytest.raises(pt.TealInputError): pt.compileTeal(expr, pt.Mode.Signature, version=7) # too large with pytest.raises(pt.TealInputError): pt.compileTeal(expr, pt.Mode.Signature, version=2.0) # decimal def test_compile_version_2(): expr = pt.Int(1) expected = """ #pragma version 2 int 1 return """.strip() actual = pt.compileTeal(expr, pt.Mode.Signature, version=2) assert actual == expected def test_compile_version_default(): expr = pt.Int(1) actual_default = pt.compileTeal(expr, pt.Mode.Signature) actual_version_2 = pt.compileTeal(expr, pt.Mode.Signature, version=2) assert actual_default == actual_version_2 def test_compile_version_3(): expr = pt.Int(1) expected = """ #pragma version 3 int 1 return """.strip() actual = pt.compileTeal(expr, pt.Mode.Signature, version=3) assert actual == expected def test_compile_version_4(): expr = pt.Int(1) expected = """ #pragma version 4 int 1 return """.strip() actual = pt.compileTeal(expr, pt.Mode.Signature, version=4) assert actual == expected def test_compile_version_5(): expr = pt.Int(1) expected = """ #pragma version 5 int 1 return """.strip() actual = pt.compileTeal(expr, pt.Mode.Signature, version=5) assert actual == expected def test_compile_version_6(): expr = pt.Int(1) expected = """ #pragma version 6 int 1 return """.strip() actual = pt.compileTeal(expr, pt.Mode.Signature, version=6) assert actual == expected def test_slot_load_before_store(): program = pt.AssetHolding.balance(pt.Int(0), pt.Int(0)).value() with pytest.raises(pt.TealInternalError): pt.compileTeal(program, pt.Mode.Application, version=2) program = pt.AssetHolding.balance(pt.Int(0), pt.Int(0)).hasValue() with pytest.raises(pt.TealInternalError): pt.compileTeal(program, pt.Mode.Application, version=2) program = pt.App.globalGetEx(pt.Int(0), pt.Bytes("key")).value() with pytest.raises(pt.TealInternalError): pt.compileTeal(program, pt.Mode.Application, version=2) program = pt.App.globalGetEx(pt.Int(0), pt.Bytes("key")).hasValue() with pytest.raises(pt.TealInternalError): pt.compileTeal(program, pt.Mode.Application, version=2) program = pt.ScratchVar().load() with pytest.raises(pt.TealInternalError): pt.compileTeal(program, pt.Mode.Application, version=2) def test_assign_scratch_slots(): myScratch = pt.ScratchVar(pt.TealType.uint64) otherScratch = pt.ScratchVar(pt.TealType.uint64, 1) anotherScratch = pt.ScratchVar(pt.TealType.uint64, 0) lastScratch = pt.ScratchVar(pt.TealType.uint64) prog = pt.Seq( [ myScratch.store(pt.Int(5)), # Slot 2 otherScratch.store(pt.Int(0)), # Slot 1 anotherScratch.store(pt.Int(7)), # Slot 0 lastScratch.store(pt.Int(9)), # Slot 3 pt.Approve(), ] ) expected = """ #pragma version 4 int 5 store 2 int 0 store 1 int 7 store 0 int 9 store 3 int 1 return """.strip() actual = pt.compileTeal(prog, mode=pt.Mode.Signature, version=4) assert actual == expected def test_scratchvar_double_assign_invalid(): myvar = pt.ScratchVar(pt.TealType.uint64, 10) otherVar = pt.ScratchVar(pt.TealType.uint64, 10) prog = pt.Seq([myvar.store(pt.Int(5)), otherVar.store(pt.Int(0)), pt.Approve()]) with pytest.raises(pt.TealInternalError): pt.compileTeal(prog, mode=pt.Mode.Signature, version=4) def test_assembleConstants(): program = pt.Itob(pt.Int(1) + pt.Int(1) + pt.Tmpl.Int("TMPL_VAR")) == pt.Concat( pt.Bytes("test"), pt.Bytes("test"), pt.Bytes("test2") ) expectedNoAssemble = """ #pragma version 3 int 1 int 1 + int TMPL_VAR + itob byte "test" byte "test" concat byte "test2" concat == return """.strip() actualNoAssemble = pt.compileTeal( program, pt.Mode.Application, version=3, assembleConstants=False ) assert expectedNoAssemble == actualNoAssemble expectedAssemble = """ #pragma version 3 intcblock 1 bytecblock 0x74657374 intc_0 // 1 intc_0 // 1 + pushint TMPL_VAR // TMPL_VAR + itob bytec_0 // "test" bytec_0 // "test" concat pushbytes 0x7465737432 // "test2" concat == return """.strip() actualAssemble = pt.compileTeal( program, pt.Mode.Application, version=3, assembleConstants=True ) assert expectedAssemble == actualAssemble with pytest.raises(pt.TealInternalError): pt.compileTeal(program, pt.Mode.Application, version=2, assembleConstants=True) def test_compile_while(): i = pt.ScratchVar() program = pt.Seq( [ i.store(pt.Int(0)), pt.While(i.load() < pt.Int(2)).Do(pt.Seq([i.store(i.load() + pt.Int(1))])), pt.Approve(), ] ) expected = """ #pragma version 4 int 0 store 0 main_l1: load 0 int 2 < bz main_l3 load 0 int 1 + store 0 b main_l1 main_l3: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual # nested i = pt.ScratchVar() j = pt.ScratchVar() program = pt.Seq( [ i.store(pt.Int(0)), pt.While(i.load() < pt.Int(2)).Do( pt.Seq( [ j.store(pt.Int(0)), pt.While(j.load() < pt.Int(5)).Do( pt.Seq([j.store(j.load() + pt.Int(1))]) ), i.store(i.load() + pt.Int(1)), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 2 < bz main_l6 int 0 store 1 main_l3: load 1 int 5 < bnz main_l5 load 0 int 1 + store 0 b main_l1 main_l5: load 1 int 1 + store 1 b main_l3 main_l6: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual def test_compile_for(): i = pt.ScratchVar() program = pt.Seq( [ pt.For( i.store(pt.Int(0)), i.load() < pt.Int(10), i.store(i.load() + pt.Int(1)) ).Do(pt.Seq([pt.App.globalPut(pt.Itob(i.load()), i.load() * pt.Int(2))])), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 10 < bz main_l3 load 0 itob load 0 int 2 * app_global_put load 0 int 1 + store 0 b main_l1 main_l3: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual # nested i = pt.ScratchVar() j = pt.ScratchVar() program = pt.Seq( [ pt.For( i.store(pt.Int(0)), i.load() < pt.Int(10), i.store(i.load() + pt.Int(1)) ).Do( pt.Seq( [ pt.For( j.store(pt.Int(0)), j.load() < pt.Int(4), j.store(j.load() + pt.Int(2)), ).Do( pt.Seq( [ pt.App.globalPut( pt.Itob(j.load()), j.load() * pt.Int(2) ) ] ) ) ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 10 < bz main_l6 int 0 store 1 main_l3: load 1 int 4 < bnz main_l5 load 0 int 1 + store 0 b main_l1 main_l5: load 1 itob load 1 int 2 * app_global_put load 1 int 2 + store 1 b main_l3 main_l6: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual def test_compile_break(): # pt.While i = pt.ScratchVar() program = pt.Seq( [ i.store(pt.Int(0)), pt.While(i.load() < pt.Int(3)).Do( pt.Seq( [ pt.If(i.load() == pt.Int(2), pt.Break()), i.store(i.load() + pt.Int(1)), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 3 < bz main_l4 load 0 int 2 == bnz main_l4 load 0 int 1 + store 0 b main_l1 main_l4: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual # pt.For i = pt.ScratchVar() program = pt.Seq( [ pt.For( i.store(pt.Int(0)), i.load() < pt.Int(10), i.store(i.load() + pt.Int(1)) ).Do( pt.Seq( [ pt.If(i.load() == pt.Int(4), pt.Break()), pt.App.globalPut(pt.Itob(i.load()), i.load() * pt.Int(2)), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 10 < bz main_l4 load 0 int 4 == bnz main_l4 load 0 itob load 0 int 2 * app_global_put load 0 int 1 + store 0 b main_l1 main_l4: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual def test_compile_continue(): # pt.While i = pt.ScratchVar() program = pt.Seq( [ i.store(pt.Int(0)), pt.While(i.load() < pt.Int(3)).Do( pt.Seq( [ pt.If(i.load() == pt.Int(2), pt.Continue()), i.store(i.load() + pt.Int(1)), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 3 < bz main_l4 load 0 int 2 == bnz main_l1 load 0 int 1 + store 0 b main_l1 main_l4: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual # pt.While program = pt.Seq( i.store(pt.Int(0)), pt.While(i.load() < pt.Int(30)).Do( pt.Seq( i.store(i.load() + pt.Int(1)), pt.Continue(), ) ), pt.Return(pt.Int(1)), ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 30 < bz main_l3 load 0 int 1 + store 0 b main_l1 main_l3: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual # pt.For i = pt.ScratchVar() program = pt.Seq( [ pt.For( i.store(pt.Int(0)), i.load() < pt.Int(10), i.store(i.load() + pt.Int(1)) ).Do( pt.Seq( [ pt.If(i.load() == pt.Int(4), pt.Continue()), pt.App.globalPut(pt.Itob(i.load()), i.load() * pt.Int(2)), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 10 < bz main_l5 load 0 int 4 == bnz main_l4 load 0 itob load 0 int 2 * app_global_put main_l4: load 0 int 1 + store 0 b main_l1 main_l5: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual def test_compile_continue_break_nested(): i = pt.ScratchVar() program = pt.Seq( [ i.store(pt.Int(0)), pt.While(i.load() < pt.Int(10)).Do( pt.Seq( [ i.store(i.load() + pt.Int(1)), pt.If(i.load() < pt.Int(4), pt.Continue(), pt.Break()), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 10 < bz main_l3 load 0 int 1 + store 0 load 0 int 4 < bnz main_l1 main_l3: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual i = pt.ScratchVar() program = pt.Seq( [ i.store(pt.Int(0)), pt.While(i.load() < pt.Int(10)).Do( pt.Seq( [ pt.If(i.load() == pt.Int(8), pt.Break()), pt.While(i.load() < pt.Int(6)).Do( pt.Seq( [ pt.If(i.load() == pt.Int(3), pt.Break()), i.store(i.load() + pt.Int(1)), ] ) ), pt.If(i.load() < pt.Int(5), pt.Continue()), i.store(i.load() + pt.Int(1)), ] ) ), pt.Approve(), ] ) expected = """#pragma version 4 int 0 store 0 main_l1: load 0 int 10 < bz main_l8 load 0 int 8 == bnz main_l8 main_l3: load 0 int 6 < bnz main_l6 main_l4: load 0 int 5 < bnz main_l1 load 0 int 1 + store 0 b main_l1 main_l6: load 0 int 3 == bnz main_l4 load 0 int 1 + store 0 b main_l3 main_l8: int 1 return """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert expected == actual def test_compile_subroutine_unsupported(): @pt.Subroutine(pt.TealType.none) def storeValue(value: pt.Expr) -> pt.Expr: return pt.App.globalPut(pt.Bytes("key"), value) program = pt.Seq( [ pt.If(pt.Txn.sender() == pt.Global.creator_address()).Then( storeValue(pt.Txn.application_args[0]) ), pt.Approve(), ] ) with pytest.raises(pt.TealInputError): pt.compileTeal(program, pt.Mode.Application, version=3) def test_compile_subroutine_no_return(): @pt.Subroutine(pt.TealType.none) def storeValue(value: pt.Expr) -> pt.Expr: return pt.App.globalPut(pt.Bytes("key"), value) program = pt.Seq( [ pt.If(pt.Txn.sender() == pt.Global.creator_address()).Then( storeValue(pt.Txn.application_args[0]) ), pt.Approve(), ] ) expected = """#pragma version 4 txn Sender global CreatorAddress == bz main_l2 txna ApplicationArgs 0 callsub storeValue_0 main_l2: int 1 return // storeValue storeValue_0: store 0 byte "key" load 0 app_global_put retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_with_return(): @pt.Subroutine(pt.TealType.none) def storeValue(value: pt.Expr) -> pt.Expr: return pt.App.globalPut(pt.Bytes("key"), value) @pt.Subroutine(pt.TealType.bytes) def getValue() -> pt.Expr: return pt.App.globalGet(pt.Bytes("key")) program = pt.Seq( [ pt.If(pt.Txn.sender() == pt.Global.creator_address()).Then( storeValue(pt.Txn.application_args[0]) ), pt.If(getValue() == pt.Bytes("fail")).Then(pt.Reject()), pt.Approve(), ] ) expected = """#pragma version 4 txn Sender global CreatorAddress == bnz main_l3 main_l1: callsub getValue_1 byte "fail" == bz main_l4 int 0 return main_l3: txna ApplicationArgs 0 callsub storeValue_0 b main_l1 main_l4: int 1 return // storeValue storeValue_0: store 0 byte "key" load 0 app_global_put retsub // getValue getValue_1: byte "key" app_global_get retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_many_args(): @pt.Subroutine(pt.TealType.uint64) def calculateSum( a1: pt.Expr, a2: pt.Expr, a3: pt.Expr, a4: pt.Expr, a5: pt.Expr, a6: pt.Expr ) -> pt.Expr: return a1 + a2 + a3 + a4 + a5 + a6 program = pt.Return( calculateSum(pt.Int(1), pt.Int(2), pt.Int(3), pt.Int(4), pt.Int(5), pt.Int(6)) == pt.Int(1 + 2 + 3 + 4 + 5 + 6) ) expected = """#pragma version 4 int 1 int 2 int 3 int 4 int 5 int 6 callsub calculateSum_0 int 21 == return // calculateSum calculateSum_0: store 5 store 4 store 3 store 2 store 1 store 0 load 0 load 1 + load 2 + load 3 + load 4 + load 5 + retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_recursive(): @pt.Subroutine(pt.TealType.uint64) def isEven(i: pt.Expr) -> pt.Expr: return ( pt.If(i == pt.Int(0)) .Then(pt.Int(1)) .ElseIf(i == pt.Int(1)) .Then(pt.Int(0)) .Else(isEven(i - pt.Int(2))) ) program = pt.Return(isEven(pt.Int(6))) expected = """#pragma version 4 int 6 callsub isEven_0 return // isEven isEven_0: store 0 load 0 int 0 == bnz isEven_0_l4 load 0 int 1 == bnz isEven_0_l3 load 0 int 2 - load 0 dig 1 callsub isEven_0 swap store 0 swap pop b isEven_0_l5 isEven_0_l3: int 0 b isEven_0_l5 isEven_0_l4: int 1 isEven_0_l5: retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_recursive_5(): @pt.Subroutine(pt.TealType.uint64) def isEven(i: pt.Expr) -> pt.Expr: return ( pt.If(i == pt.Int(0)) .Then(pt.Int(1)) .ElseIf(i == pt.Int(1)) .Then(pt.Int(0)) .Else(isEven(i - pt.Int(2))) ) program = pt.Return(isEven(pt.Int(6))) expected = """#pragma version 5 int 6 callsub isEven_0 return // isEven isEven_0: store 0 load 0 int 0 == bnz isEven_0_l4 load 0 int 1 == bnz isEven_0_l3 load 0 int 2 - load 0 swap callsub isEven_0 swap store 0 b isEven_0_l5 isEven_0_l3: int 0 b isEven_0_l5 isEven_0_l4: int 1 isEven_0_l5: retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=5, assembleConstants=False ) assert actual == expected def test_compile_subroutine_recursive_multiple_args(): @pt.Subroutine(pt.TealType.uint64) def multiplyByAdding(a, b): return ( pt.If(a == pt.Int(0)) .Then(pt.Return(pt.Int(0))) .Else(pt.Return(b + multiplyByAdding(a - pt.Int(1), b))) ) program = pt.Return(multiplyByAdding(pt.Int(3), pt.Int(5))) expected = """#pragma version 4 int 3 int 5 callsub multiplyByAdding_0 return // multiplyByAdding multiplyByAdding_0: store 1 store 0 load 0 int 0 == bnz multiplyByAdding_0_l2 load 1 load 0 int 1 - load 1 load 0 load 1 dig 3 dig 3 callsub multiplyByAdding_0 store 0 store 1 load 0 swap store 0 swap pop swap pop + retsub multiplyByAdding_0_l2: int 0 retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_recursive_multiple_args_5(): @pt.Subroutine(pt.TealType.uint64) def multiplyByAdding(a, b): return ( pt.If(a == pt.Int(0)) .Then(pt.Return(pt.Int(0))) .Else(pt.Return(b + multiplyByAdding(a - pt.Int(1), b))) ) program = pt.Return(multiplyByAdding(pt.Int(3), pt.Int(5))) expected = """#pragma version 5 int 3 int 5 callsub multiplyByAdding_0 return // multiplyByAdding multiplyByAdding_0: store 1 store 0 load 0 int 0 == bnz multiplyByAdding_0_l2 load 1 load 0 int 1 - load 1 load 0 load 1 uncover 3 uncover 3 callsub multiplyByAdding_0 cover 2 store 1 store 0 + retsub multiplyByAdding_0_l2: int 0 retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=5, assembleConstants=False ) assert actual == expected def test_compile_subroutine_mutually_recursive_4(): @pt.Subroutine(pt.TealType.uint64) def isEven(i: pt.Expr) -> pt.Expr: return pt.If(i == pt.Int(0), pt.Int(1), pt.Not(isOdd(i - pt.Int(1)))) @pt.Subroutine(pt.TealType.uint64) def isOdd(i: pt.Expr) -> pt.Expr: return pt.If(i == pt.Int(0), pt.Int(0), pt.Not(isEven(i - pt.Int(1)))) program = pt.Return(isEven(pt.Int(6))) expected = """#pragma version 4 int 6 callsub isEven_0 return // isEven isEven_0: store 0 load 0 int 0 == bnz isEven_0_l2 load 0 int 1 - load 0 dig 1 callsub isOdd_1 swap store 0 swap pop ! b isEven_0_l3 isEven_0_l2: int 1 isEven_0_l3: retsub // isOdd isOdd_1: store 1 load 1 int 0 == bnz isOdd_1_l2 load 1 int 1 - load 1 dig 1 callsub isEven_0 swap store 1 swap pop ! b isOdd_1_l3 isOdd_1_l2: int 0 isOdd_1_l3: retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_mutually_recursive_5(): @pt.Subroutine(pt.TealType.uint64) def isEven(i: pt.Expr) -> pt.Expr: return pt.If(i == pt.Int(0), pt.Int(1), pt.Not(isOdd(i - pt.Int(1)))) @pt.Subroutine(pt.TealType.uint64) def isOdd(i: pt.Expr) -> pt.Expr: return pt.If(i == pt.Int(0), pt.Int(0), pt.Not(isEven(i - pt.Int(1)))) program = pt.Return(isEven(pt.Int(6))) expected = """#pragma version 5 int 6 callsub isEven_0 return // isEven isEven_0: store 0 load 0 int 0 == bnz isEven_0_l2 load 0 int 1 - load 0 swap callsub isOdd_1 swap store 0 ! b isEven_0_l3 isEven_0_l2: int 1 isEven_0_l3: retsub // isOdd isOdd_1: store 1 load 1 int 0 == bnz isOdd_1_l2 load 1 int 1 - load 1 swap callsub isEven_0 swap store 1 ! b isOdd_1_l3 isOdd_1_l2: int 0 isOdd_1_l3: retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=5, assembleConstants=False ) assert actual == expected def test_compile_subroutine_mutually_recursive_different_arg_count_4(): @pt.Subroutine(pt.TealType.uint64) def factorial(i: pt.Expr) -> pt.Expr: return pt.If( i <= pt.Int(1), pt.Int(1), factorial_intermediate(i - pt.Int(1), pt.Bytes("inconsequential")) * i, ) @pt.Subroutine(pt.TealType.uint64) def factorial_intermediate(i: pt.Expr, j: pt.Expr) -> pt.Expr: return pt.Seq(pt.Pop(j), factorial(i)) program = pt.Return(factorial(pt.Int(4)) == pt.Int(24)) expected = """#pragma version 4 int 4 callsub factorial_0 int 24 == return // factorial factorial_0: store 0 load 0 int 1 <= bnz factorial_0_l2 load 0 int 1 - byte "inconsequential" load 0 dig 2 dig 2 callsub factorialintermediate_1 swap store 0 swap pop swap pop load 0 * b factorial_0_l3 factorial_0_l2: int 1 factorial_0_l3: retsub // factorial_intermediate factorialintermediate_1: store 2 store 1 load 2 pop load 1 load 1 load 2 dig 2 callsub factorial_0 store 1 store 2 load 1 swap store 1 swap pop retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_mutually_recursive_different_arg_count_5(): @pt.Subroutine(pt.TealType.uint64) def factorial(i: pt.Expr) -> pt.Expr: return pt.If( i <= pt.Int(1), pt.Int(1), factorial_intermediate(i - pt.Int(1), pt.Bytes("inconsequential")) * i, ) @pt.Subroutine(pt.TealType.uint64) def factorial_intermediate(i: pt.Expr, j: pt.Expr) -> pt.Expr: return pt.Seq(pt.Log(j), factorial(i)) program = pt.Return(factorial(pt.Int(4)) == pt.Int(24)) expected = """#pragma version 5 int 4 callsub factorial_0 int 24 == return // factorial factorial_0: store 0 load 0 int 1 <= bnz factorial_0_l2 load 0 int 1 - byte "inconsequential" load 0 cover 2 callsub factorialintermediate_1 swap store 0 load 0 * b factorial_0_l3 factorial_0_l2: int 1 factorial_0_l3: retsub // factorial_intermediate factorialintermediate_1: store 2 store 1 load 2 log load 1 load 1 load 2 uncover 2 callsub factorial_0 cover 2 store 2 store 1 retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=5, assembleConstants=False ) assert actual == expected def test_compile_loop_in_subroutine(): @pt.Subroutine(pt.TealType.none) def setState(value: pt.Expr) -> pt.Expr: i = pt.ScratchVar() return pt.For( i.store(pt.Int(0)), i.load() < pt.Int(10), i.store(i.load() + pt.Int(1)) ).Do(pt.App.globalPut(pt.Itob(i.load()), value)) program = pt.Seq([setState(pt.Bytes("value")), pt.Approve()]) expected = """#pragma version 4 byte "value" callsub setState_0 int 1 return // setState setState_0: store 0 int 0 store 1 setState_0_l1: load 1 int 10 < bz setState_0_l3 load 1 itob load 0 app_global_put load 1 int 1 + store 1 b setState_0_l1 setState_0_l3: retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_invalid_name(): def tmp() -> pt.Expr: return pt.Int(1) tmp.__name__ = "invalid-;)" program = pt.Subroutine(pt.TealType.uint64)(tmp)() expected = """#pragma version 4 callsub invalid_0 return // invalid-;) invalid_0: int 1 retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=False ) assert actual == expected def test_compile_subroutine_assemble_constants(): @pt.Subroutine(pt.TealType.none) def storeValue(key: pt.Expr, t1: pt.Expr, t2: pt.Expr, t3: pt.Expr) -> pt.Expr: return pt.App.globalPut(key, t1 + t2 + t3 + pt.Int(10)) program = pt.Seq( [ pt.If(pt.Txn.application_id() == pt.Int(0)).Then( storeValue( pt.Concat(pt.Bytes("test"), pt.Bytes("test"), pt.Bytes("a")), pt.Int(1), pt.Int(1), pt.Int(3), ) ), pt.Approve(), ] ) expected = """#pragma version 4 intcblock 1 bytecblock 0x74657374 txn ApplicationID pushint 0 // 0 == bz main_l2 bytec_0 // "test" bytec_0 // "test" concat pushbytes 0x61 // "a" concat intc_0 // 1 intc_0 // 1 pushint 3 // 3 callsub storeValue_0 main_l2: intc_0 // 1 return // storeValue storeValue_0: store 3 store 2 store 1 store 0 load 0 load 1 load 2 + load 3 + pushint 10 // 10 + app_global_put retsub """.strip() actual = pt.compileTeal( program, pt.Mode.Application, version=4, assembleConstants=True ) assert actual == expected def test_compile_wide_ratio(): cases = ( ( pt.WideRatio([pt.Int(2), pt.Int(100)], [pt.Int(5)]), """#pragma version 5 int 2 int 100 mulw int 0 int 5 divmodw pop pop swap ! assert return """, ), ( pt.WideRatio([pt.Int(2), pt.Int(100)], [pt.Int(10), pt.Int(5)]), """#pragma version 5 int 2 int 100 mulw int 10 int 5 mulw divmodw pop pop swap ! assert return """, ), ( pt.WideRatio([pt.Int(2), pt.Int(100), pt.Int(3)], [pt.Int(10), pt.Int(5)]), """#pragma version 5 int 2 int 100 mulw int 3 uncover 2 dig 1 * cover 2 mulw cover 2 + swap int 10 int 5 mulw divmodw pop pop swap ! assert return """, ), ( pt.WideRatio( [pt.Int(2), pt.Int(100), pt.Int(3)], [pt.Int(10), pt.Int(5), pt.Int(6)] ), """#pragma version 5 int 2 int 100 mulw int 3 uncover 2 dig 1 * cover 2 mulw cover 2 + swap int 10 int 5 mulw int 6 uncover 2 dig 1 * cover 2 mulw cover 2 + swap divmodw pop pop swap ! assert return """, ), ( pt.WideRatio( [pt.Int(2), pt.Int(100), pt.Int(3), pt.Int(4)], [pt.Int(10), pt.Int(5), pt.Int(6)], ), """#pragma version 5 int 2 int 100 mulw int 3 uncover 2 dig 1 * cover 2 mulw cover 2 + swap int 4 uncover 2 dig 1 * cover 2 mulw cover 2 + swap int 10 int 5 mulw int 6 uncover 2 dig 1 * cover 2 mulw cover 2 + swap divmodw pop pop swap ! assert return """, ), ) for program, expected in cases: actual = pt.compileTeal( program, pt.Mode.Application, version=5, assembleConstants=False ) assert actual == expected.strip()
17.515118
88
0.590207
4,510
31,860
4.072506
0.043016
0.047912
0.023085
0.020145
0.859694
0.834976
0.800457
0.760875
0.742473
0.72701
0
0.048096
0.282141
31,860
1,818
89
17.524752
0.754974
0.003453
0
0.73789
0
0
0.250848
0.00724
0
0
0.001164
0
0.029861
1
0.038487
false
0
0.001327
0.012608
0.079628
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
21a1bef5e1c6e77b10f18bef1f510edcf5c31243
5,334
py
Python
insights/tests/parsers/test_grubenv.py
TZ3070/insights-core
13f4fc6bfcb89d76f0255c6259902360a298d619
[ "Apache-2.0" ]
null
null
null
insights/tests/parsers/test_grubenv.py
TZ3070/insights-core
13f4fc6bfcb89d76f0255c6259902360a298d619
[ "Apache-2.0" ]
null
null
null
insights/tests/parsers/test_grubenv.py
TZ3070/insights-core
13f4fc6bfcb89d76f0255c6259902360a298d619
[ "Apache-2.0" ]
null
null
null
import doctest from insights.parsers import grubenv from insights.tests.parsers import skip_exception_check from insights.tests import context_wrap GRUBENV_WITH_TUNED_PARAMS = """ # GRUB Environment Block saved_entry=295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64 kernelopts=root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200 boot_success=0 boot_indeterminate=2 tuned_params=transparent_hugepages=never tuned_initrd= ############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### """.strip() # noqa GRUBENV_WITHOUT_TUNED_PARAMS = """ # GRUB Environment Block saved_entry=295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64 kernelopts=root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200 boot_success=0 boot_indeterminate=2 ############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### """.strip() # noqa GRUBENV_RHEL7 = """ # GRUB Environment Block saved_entry=Red Hat Enterprise Linux Server (3.10.0-1127.el7.x86_64) 7.8 (Maipo) ###################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### """.strip() # noqa def test_doc_examples(): env = { 'grubenv': grubenv.GrubEnv(context_wrap(GRUBENV_WITH_TUNED_PARAMS)) } failed, total = doctest.testmod(grubenv, globs=env) assert failed == 0 def test_with_tuned_params(): results = grubenv.GrubEnv(context_wrap(GRUBENV_WITH_TUNED_PARAMS)) assert results is not None assert results.has_kernelopts assert results.has_tuned_params assert results.kernelopts == "root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200" # noqa assert results.tuned_params == "transparent_hugepages=never" assert results['saved_entry'] == "295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64" assert results['boot_success'] == "0" assert results['boot_indeterminate'] == "2" def test_without_tuned_params(): results = grubenv.GrubEnv(context_wrap(GRUBENV_WITHOUT_TUNED_PARAMS)) assert results is not None assert results.has_kernelopts assert not results.has_tuned_params assert results.kernelopts == "root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200" # noqa assert results.tuned_params == "" assert results['saved_entry'] == "295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64" assert results['boot_success'] == "0" assert results['boot_indeterminate'] == "2" def test_r7(): results = grubenv.GrubEnv(context_wrap(GRUBENV_RHEL7)) assert results is not None assert not results.has_kernelopts assert not results.has_tuned_params assert results.kernelopts == "" assert results.tuned_params == "" assert results['saved_entry'] == "Red Hat Enterprise Linux Server (3.10.0-1127.el7.x86_64) 7.8 (Maipo)" def test_skip(): skip_exception_check(grubenv.GrubEnv, output_str="# test") skip_exception_check(grubenv.GrubEnv)
66.675
918
0.447132
460
5,334
4.952174
0.18913
0.108428
0.05619
0.052678
0.859526
0.795874
0.756804
0.756804
0.674276
0.674276
0
0.041566
0.070866
5,334
79
919
67.518987
0.418079
0.004499
0
0.515625
0
0.09375
0.694193
0.57862
0
0
0
0
0.359375
1
0.078125
false
0
0.0625
0
0.140625
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
21b517ff259c961a68e2589d024c3bf99c2ee885
52,922
py
Python
prody/tests/sequence/test_analysis.py
grandevelia/ProDy
7c725640a94c16543423c0756388998cb86a97ae
[ "MIT" ]
210
2015-01-26T08:17:56.000Z
2022-03-30T01:40:34.000Z
prody/tests/sequence/test_analysis.py
grandevelia/ProDy
7c725640a94c16543423c0756388998cb86a97ae
[ "MIT" ]
555
2015-01-05T21:51:54.000Z
2022-03-31T16:51:41.000Z
prody/tests/sequence/test_analysis.py
grandevelia/ProDy
7c725640a94c16543423c0756388998cb86a97ae
[ "MIT" ]
99
2015-02-09T18:00:39.000Z
2022-03-07T12:52:51.000Z
__author__ = 'Ahmet Bakan, Anindita Dutta, Wenzhi Mao' from prody.tests import TestCase from numpy import array, log, zeros, char, ones, fromfile from numpy.testing import assert_array_equal, assert_array_almost_equal from prody.tests.datafiles import * from prody import LOGGER, calcShannonEntropy, buildMutinfoMatrix, parseMSA from prody import calcMSAOccupancy, buildSeqidMatrix, uniqueSequences from prody import buildOMESMatrix, buildSCAMatrix, calcMeff from prody import buildDirectInfoMatrix LOGGER.verbosity = None FASTA = parseMSA(pathDatafile('msa_Cys_knot.fasta')) FASTA_ALPHA = char.isalpha(FASTA._msa) FASTA_UPPER = char.upper(FASTA._msa) FASTA_NUMBER, FASTA_LENGTH = FASTA_ALPHA.shape FASTA_EYE = zeros((FASTA_NUMBER, FASTA_NUMBER)) for i in range(FASTA_NUMBER): FASTA_EYE[i, i] = 1 for j in range(i + 1, FASTA_NUMBER): score = 0.0 ncols = 0 for k in range(FASTA_LENGTH): if FASTA_ALPHA[i, k] or FASTA_ALPHA[j, k]: if FASTA_UPPER[i, k] == FASTA_UPPER[j, k]: score += 1 ncols += 1 FASTA_EYE[i, j] = FASTA_EYE[j, i] = score / ncols class TestCalcShannonEntropy(TestCase): def testSixSequences(self): msa = array([list('AAAAaaaaAAAAaaaa'), list('AAACaaacAAACaaac'), list('AACDaacdAACDaacd'), list('ACCEacceacceACCE'), list('ACDFacdfacdfACDF'), list('ACDGacdgacdgACDG')], dtype='|S1') expect = -log(1. / array([1, 2, 3, 6] * 4)) result = calcShannonEntropy(msa) assert_array_almost_equal(expect, result) def testTwenty(self): msa = array([[char] for char in 'ACDEFGHIKLMNPQRSTVWY'], dtype='|S1') expect = -log(1. / 20) result = calcShannonEntropy(msa) assert_array_almost_equal(expect, result) def testSmallProbability(self): msa = zeros((1000000, 1), '|S1') msa[0] = 'A' msa[1:] = 'C' expect = array([1., 999999.]) / 1000000 expect = - (expect * log(expect)).sum() result = calcShannonEntropy(msa) assert_array_almost_equal(expect, result) def testAmbiguous(self): msa = array([list('bjzxBJZX'), list('bjzxBJZX'), ], dtype='|S1') expect = -log(1. / array([2, 2, 2, 20] * 2)) result = calcShannonEntropy(msa) assert_array_almost_equal(expect, result) def testGapDividend(self): msa = array([list('AAAA'), list('AAAC'), list('AACD'), list('ACCE'), list('ACDF'), list('ACDG'), list('----')], dtype='|S1') expect = -log(1. / array([1, 2, 3, 6])) result = calcShannonEntropy(msa, omitgaps=True) assert_array_almost_equal(expect, result) """ def testSixSequences3(self): msa = array([list('AAAA'), list('AAAB'), list('AABC'), list('ABBD'), list('ABCE'), list('ABCF')]) expect = -log(1. / array([1, 2, 3, 6])) result = calcInfoEntropy(msa) assert_array_almost_equal(expect, result) """ class TestCalcMutualInfo(TestCase): def testSixSequences(self): msa = array([list('ACCA'), list('ACDA'), list('ACEC'), list('ACGC')], dtype='|S1') expect = array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., log(2.)], [0., 0., log(2.), 0.], ]) result = buildMutinfoMatrix(msa) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testTwenty(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, s] for s in seq], dtype='|S1') expect = log(20.) expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testTwentyReversed(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, seq[-i-1]] for i, s in enumerate(seq)], dtype='|S1') expect = log(20.) expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity(self): msa = array([list('OX'), list('XO')], dtype='|S1') expect = array([[0., log(2.)], [log(2.), 0.]]) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testNoAmbiguity(self): msa = array([list('OX'), list('XO')], dtype='|S1') expect = array([[0., log(2.)], [log(2.), 0.]]) result = buildMutinfoMatrix(msa, ambiquity=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, ambiquity=False, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity2(self): msa = array([list('AB'), list('BZ')], dtype='|S1') expect = (2 * .25 * log(.25 / .5 / .25) + 4 * .125 * log(.125 / .25 / .25)) expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity3(self): msa = array([list('XX')], dtype='|S1') expect = zeros((2, 2)) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity4(self): msa = array([list('Bb'), list('jJ'), list('Zz'), ], dtype='|S1') expect = log((1./12) / (1./6) / (1./6)) expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity5(self): expect = array([[0., 0.], [0., 0.]]) for seq in ['bx', 'Xb', 'jX', 'Xj', 'xz', 'ZX', 'bj', 'jb', 'bz', 'zb', 'jz', 'zj']: msa = array([list(seq)], dtype='|S1') result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg=seq + ' failed') def testAmbiguity6(self): expect = zeros((2, 2)) for seq in ['bb', 'jj', 'zz']: msa = array([list(seq)], dtype='|S1') result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg=seq + ' failed') def testAmbiguity7(self): msa = array([list('bx'), list('xb')], dtype='|S1') expect = (72 * 0.0125 * log(0.0125/0.0250/0.275) + 4 * 0.0250 * log(0.0250/0.275/0.275)) expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testInf(self): msa = zeros((500, 10), '|S1') msa.fill('.') msa[95, 8] = 's' msa[95, 9] = 'i' expect = zeros((10, 10)) expect[8, 9] = expect[9, 8] = 0.002 * log(500.) + .998 * log(1. / .998) result = buildMutinfoMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildMutinfoMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testNorm(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, seq[-i-1]] for i, s in enumerate(seq)], dtype='|S1') expect = 1. expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa, norm=True) assert_array_almost_equal(expect, result, err_msg='norm failed') def testNorm2(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, 'O' if i % 2 else 'U'] for i, s in enumerate(seq)], dtype='|S1') expect = log(1./20. / (1./20. * 1./2.)) / (-log(1./20.)) expect = array([[0., expect], [expect, 0.]]) result = buildMutinfoMatrix(msa, norm=True) assert_array_almost_equal(expect, result, err_msg='norm failed') class TestCalcMSAOccupancy(TestCase): def testResidueCount(self): assert_array_equal(calcMSAOccupancy(FASTA, 'residue', count=1), FASTA_ALPHA.sum(0)) def testSequenceCount(self): assert_array_equal(calcMSAOccupancy(FASTA, 'sequence', count=1), FASTA_ALPHA.sum(1)) def testResidueOccupancy(self): assert_array_equal(calcMSAOccupancy(FASTA, 'residue'), FASTA_ALPHA.sum(0) / (FASTA.numSequences() * 1.0)) def testSequenceOccupancy(self): assert_array_equal(calcMSAOccupancy(FASTA, 'sequence'), FASTA_ALPHA.sum(1) / (FASTA.numResidues() * 1.0)) class TestIdentity(TestCase): def testIdentityMatrix(self): assert_array_almost_equal(FASTA_EYE, buildSeqidMatrix(FASTA)) def testIdentityMatrixNonTurbo(self): assert_array_almost_equal(FASTA_EYE, buildSeqidMatrix(FASTA, turbo=False)) class TestUnique(TestCase): def testUnique(self): seqid = 0.98 unique = ones(FASTA_NUMBER, bool) for i in range(FASTA_NUMBER): if not unique[i]: continue for j in range(i+1, FASTA_NUMBER): if FASTA_EYE[i, j] >= seqid: unique[j] = False assert_array_equal(unique, uniqueSequences(FASTA, seqid)) def testUnique2(self): seqid = 0.5 unique = ones(FASTA_NUMBER, bool) for i in range(FASTA_NUMBER): if not unique[i]: continue for j in range(i+1, FASTA_NUMBER): if FASTA_EYE[i, j] >= seqid: unique[j] = False assert_array_equal(unique, uniqueSequences(FASTA, seqid)) class TestCalcOMES(TestCase): def testZero(self): msa = array([list('ACCA'), list('ACDA'), list('ACCC'), list('ACDC')], dtype='|S1') expect = array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], ]) result = buildOMESMatrix(msa) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testFourSequences(self): msa = array([list('ACCA'), list('ACDA'), list('ACDC'), list('ACDC')], dtype='|S1') expect = array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 4./3], [0., 0., 4./3, 0.], ]) result = buildOMESMatrix(msa) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testTwenty(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, s] for s in seq], dtype='|S1') expect = array([[0., 380.], [380., 0.]]) result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testTwentyReversed(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, seq[-i-1]] for i, s in enumerate(seq)], dtype='|S1') expect = array([[0., 380.], [380., 0.]]) result = buildOMESMatrix(msa) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity(self): msa = array([list('OX'), list('XO')], dtype='|S1') expect = array([[0., 2.], [2., 0.]]) result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testNoAmbiguity(self): msa = array([list('OX'), list('XO')], dtype='|S1') expect = array([[0., 2.], [2., 0.]]) result = buildOMESMatrix(msa, ambiquity=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, ambiquity=False, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity2(self): msa = array([list('AB'), list('BZ')], dtype='|S1') expect = array([[0., 2.], [2., 0.]]) result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity3(self): msa = array([list('XX')], dtype='|S1') expect = zeros((2, 2)) result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity4(self): msa = array([list('Bb'), list('jJ'), list('Zz'), ], dtype='|S1') expect = array([[0., 6.], [6., 0.]]) result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testAmbiguity5(self): expect = array([[0., 0.], [0., 0.]]) for seq in ['bx', 'Xb', 'jX', 'Xj', 'xz', 'ZX', 'bj', 'jb', 'bz', 'zb', 'jz', 'zj']: msa = array([list(seq)], dtype='|S1') result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg=seq + ' failed') def testAmbiguity6(self): expect = zeros((2, 2)) for seq in ['bb', 'jj', 'zz']: msa = array([list(seq)], dtype='|S1') result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg=seq + ' failed') def testAmbiguity7(self): msa = array([list('bx'), list('xb')], dtype='|S1') expect = array([[0., 162./121], [162./121, 0.]]) result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testInf(self): msa = zeros((500, 10), '|S1') msa.fill('.') msa[95, 8] = 's' msa[95, 9] = 'i' expect = zeros((10, 10)) expect[8, 9] = expect[9, 8] = 500. result = buildOMESMatrix(msa, debug=False) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildOMESMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') class TestCalcSCA(TestCase): def testZero(self): msa = array([list('ACCD'), list('ACDD'), list('ACCC'), list('ACDC')], dtype='|S1') expect = array([log(0.975/.025)*.5, log(0.95/.05)*.5]) weight = ((expect ** 2).sum())**.5 expect = expect / weight * array([log(0.975/.025), log(0.95/.05)]) expect = (expect ** 2).mean() - (expect.mean()) ** 2 expect = array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., expect, 0.], [0., 0., 0., expect], ]) result = buildSCAMatrix(msa) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildSCAMatrix(msa, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') def testMATLAB(self): sca = fromfile(pathDatafile('msa_Cys_knot_sca.dat')) expect = sca.reshape((10, 10)) fasta = FASTA[:, :10] result = buildSCAMatrix(fasta, turbo=True) assert_array_almost_equal(expect, result, err_msg='turbo failed') result = buildSCAMatrix(fasta, turbo=False) assert_array_almost_equal(expect, result, err_msg='w/out turbo failed') class TestCalcMeff(TestCase): def testZero1(self): msa = array([list('ACCD')] * 100, dtype='|S1') expect = 1. result = calcMeff(msa) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.9) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.4) assert_array_almost_equal(expect, result) expect = (1., zeros((100)) + 1./100) result = calcMeff(msa, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.9, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.4, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') def testZero2(self): msa = array([list('AACC')] * 50 + [list('AADD')] * 50, dtype='|S1') expect = 2. result = calcMeff(msa) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.9) assert_array_almost_equal(expect, result) expect = 1. result = calcMeff(msa, seqid=0.4) assert_array_almost_equal(expect, result) expect = (2., zeros((100)) + 1./50) result = calcMeff(msa, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.9, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') expect = (1., zeros((100)) + 1./100) result = calcMeff(msa, seqid=0.4, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') def testTwenty(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, s] for s in seq], dtype='|S1') expect = 20. result = calcMeff(msa) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.9) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.4) assert_array_almost_equal(expect, result) expect = (20., ones(20)) result = calcMeff(msa, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.9, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.4, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') def testTwentyReversed(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, seq[-i-1]] for i, s in enumerate(seq)], dtype='|S1') expect = 20. result = calcMeff(msa) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.9) assert_array_almost_equal(expect, result) result = calcMeff(msa, seqid=0.4) assert_array_almost_equal(expect, result) expect = (20., ones(20)) result = calcMeff(msa, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.9, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') result = calcMeff(msa, seqid=0.4, weight=True) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') def testMATLAB(self): expect = 1.8416666666666664e+01 result = calcMeff(FASTA, refine=True) assert_array_almost_equal(expect, result) result = calcMeff(FASTA, refine=True, weight=True) expect = (expect, array([1./3, 1./3, 1./4, 1./2, 1., 1., 1., 1., 1./3, 1./3, 1./3, 1./2, 1./2, 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1./2, 1./2], dtype='float')) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') expect = 23 result = calcMeff(FASTA, seqid=0.9, refine=True) assert_array_almost_equal(expect, result) result = calcMeff(FASTA, seqid=0.9, refine=True, weight=True) expect = (expect, array([0.5, 0.5, 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.5, 0.5, 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], dtype='float')) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') expect = 4.66689144189144 result = calcMeff(FASTA, seqid=0.4, refine=True) assert_array_almost_equal(expect, result) result = calcMeff(FASTA, seqid=0.4, refine=True, weight=True) expect = (expect, array([0.071428571, 0.071428571, 0.066666667, 0.083333333, 0.076923077, 0.066666667, 0.076923077, 0.090909091, 0.090909091, 0.071428571, 0.076923077, 0.076923077, 0.1, 0.2, 0.166666667, 0.25, 1., 0.25, 0.25, 0.25, 0.25, 0.333333333, 0.5, 0.071428571, 0.125, ], dtype='float')) assert_array_almost_equal(expect[0], result[0], err_msg='weight failed') assert_array_almost_equal(expect[1], result[1], err_msg='weight failed') class TestDirectInfo(TestCase): def testZero(self): msa = array([list('ACCY')] * 100, dtype='|S1') expect = array([[0., 0.66325166608, 0.66325166608, 0.66222154839], [0.66325166608, 0., 0.66325166608, 0.66222154839], [0.66325166608, 0.66325166608, 0., 0.66222154839], [0.66222154839, 0.66222154839, 0.66222154839, 0.], ]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 0.53477644, 0.53477644, 0.58513139], [0.53477644, 0., 0.53477644, 0.58513139], [0.53477644, 0.53477644, 0., 0.58513139], [0.58513139, 0.58513139, 0.58513139, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 0.41397564, 0.41397564, 0.41295744], [0.41397564, 0., 0.41397564, 0.41295744], [0.41397564, 0.41397564, 0., 0.41295744], [0.41295744, 0.41295744, 0.41295744, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testZero1(self): msa = array([list('ACCD')] * 100, dtype='|S1') expect = array([[0., 0.13010138, 0.13010138, 0.13008827], [0.13010138, 0., 0.13010138, 0.13008827], [0.13010138, 0.13010138, 0., 0.13008827], [0.13008827, 0.13008827, 0.13008827, 0.]]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 0.23225802, 0.23225802, 0.23225606], [0.23225802, 0., 0.23225802, 0.23225606], [0.23225802, 0.23225802, 0., 0.23225606], [0.23225606, 0.23225606, 0.23225606, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 0.03910082, 0.03910082, 0.03910178], [0.03910082, 0., 0.03910082, 0.03910178], [0.03910082, 0.03910082, 0., 0.03910178], [0.03910178, 0.03910178, 0.03910178, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testZero2(self): msa = array([list('AAYY')] * 50 + [list('YYDD')] * 50, dtype='|S1') expect = array([[0., 1.0248086877, 1.0001784999, 1.0001784999], [1.0248086877, 0., 1.0001784999, 1.0001784999], [1.0001784999, 1.0001784999, 0., 1.0248086877], [1.0001784999, 1.0001784999, 1.0248086877, 0.], ]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 1.30637709, 1.00041168, 1.00041168], [1.30637709, 0., 1.00041168, 1.00041168], [1.00041168, 1.00041168, 0., 1.30637709], [1.00041168, 1.00041168, 1.30637709, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 0.55686383, 0.55605426, 0.55605426], [0.55686383, 0., 0.55605426, 0.55605426], [0.55605426, 0.55605426, 0., 0.55686383], [0.55605426, 0.55605426, 0.55686383, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testZero3(self): msa = array([list('AACC')] * 50 + [list('CCDD')] * 50, dtype='|S1') expect = array([[0., 0.23179074, 0.23178758, 0.23178758], [0.23179074, 0., 0.23178758, 0.23178758], [0.23178758, 0.23178758, 0., 0.23178758], [0.23178758, 0.23178758, 0.23178758, 0.]]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 0.54966352, 0.54965557, 0.54965557], [0.54966352, 0., 0.54965557, 0.54965557], [0.54965557, 0.54965557, 0., 0.54965565], [0.54965557, 0.54965557, 0.54965565, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 0.04349673, 0.0434971, 0.0434971], [0.04349673, 0., 0.0434971, 0.0434971], [0.0434971, 0.0434971, 0., 0.04349711], [0.0434971, 0.0434971, 0.04349711, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testZero4(self): msa = array([list('AAYY')] * 50 + [list('AADD')] * 50, dtype='|S1') expect = array([[0., 0.68512031, 0.54656421, 0.54656421], [0.68512031, 0., 0.54656421, 0.54656421], [0.54656421, 0.54656421, 0., 1.05788193], [0.54656421, 0.54656421, 1.05788193, 0.]]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 0.65929972, 0.52603587, 0.52603587], [0.65929972, 0., 0.52603587, 0.52603587], [0.52603587, 0.52603587, 0., 1.99332504], [0.52603587, 0.52603587, 1.99332504, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 0.48628027, 0.27348214, 0.27348214], [0.48628027, 0., 0.27348214, 0.27348214], [0.27348214, 0.27348214, 0., 0.65235465], [0.27348214, 0.27348214, 0.65235465, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testTwenty(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, s] for s in seq], dtype='|S1') expect = array([[0., 3.0302471958885744], [3.0302471958885744, 0.]]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 3.0757238], [3.0757238, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 1.89488914], [1.89488914, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testTwentyReversed(self): seq = 'ACDEFGHIKLMNPQRSTVWY' msa = array([[s, seq[-i-1]] for i, s in enumerate(seq)], dtype='|S1') expect = array([[0., 3.030670764986982], [3.030670764986982, 0.]]) result = buildDirectInfoMatrix(msa) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.3 expect = array([[0., 3.12974624], [3.12974624, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.3) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.3, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') # pseudo_weight=0.8 expect = array([[0., 1.89488914], [1.89488914, 0.]]) result = buildDirectInfoMatrix(msa, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(msa, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.9, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.9, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') result = buildDirectInfoMatrix(msa, seqid=0.4, pseudo_weight=0.8) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix( msa, seqid=0.4, pseudo_weight=0.8, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testMATLAB8(self): di = fromfile(pathDatafile('msa_Cys_knot_di.dat')) expect = di.reshape((8, 8)) fasta = FASTA[:, :8] result = buildDirectInfoMatrix(fasta) assert_array_almost_equal( expect, result, err_msg='w/out refine failed') result = buildDirectInfoMatrix(fasta, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed') def testMATLAB10(self): di = fromfile(pathDatafile('msa_Cys_knot_di.dat')) expect = di.reshape((8, 8)) fasta = FASTA[:, :10] result = buildDirectInfoMatrix(fasta, refine=True) assert_array_almost_equal(expect, result, err_msg='refine failed')
44.138449
79
0.588904
6,231
52,922
4.82892
0.046221
0.088471
0.132773
0.171824
0.909768
0.895078
0.886636
0.836452
0.827146
0.821297
0
0.083569
0.285496
52,922
1,198
80
44.175292
0.712162
0.004743
0
0.767821
0
0
0.07785
0
0
0
0
0
0.244399
1
0.057026
false
0
0.008147
0
0.074338
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
21ce883732fe6d5c6de161b595f75c86e587257a
48,733
py
Python
snooty/test_openapi.py
nlarew/snooty-parser
b05da93853fea447d01fd933697219048a35a955
[ "Apache-2.0", "CNRI-Python-GPL-Compatible" ]
null
null
null
snooty/test_openapi.py
nlarew/snooty-parser
b05da93853fea447d01fd933697219048a35a955
[ "Apache-2.0", "CNRI-Python-GPL-Compatible" ]
null
null
null
snooty/test_openapi.py
nlarew/snooty-parser
b05da93853fea447d01fd933697219048a35a955
[ "Apache-2.0", "CNRI-Python-GPL-Compatible" ]
null
null
null
from pathlib import Path from . import rstparser from .util_test import check_ast_testing_string from .types import ProjectConfig from .parser import parse_rst, JSONVisitor ROOT_PATH = Path("test_data") def test_openapi() -> None: path = ROOT_PATH.joinpath(Path("test.rst")) project_config = ProjectConfig(ROOT_PATH, "", default_domain="mongodb", source="./") parser = rstparser.Parser(project_config, JSONVisitor) # Test a simple literally-included code block page, diagnostics = parse_rst( parser, path, """ .. openapi:: /test_parser/openapi-admin-v3.yaml """, ) page.finish(diagnostics) assert diagnostics == [] check_ast_testing_string( page.ast, """ <root><directive domain="mongodb" name="openapi"><text>/test_parser/openapi-admin-v3.yaml</text><section><heading id="base-url"><text>Base URL</text></heading><code lang="none" copyable="True">https://realm.mongodb.com/api/admin/v3.0</code><paragraph><text>The root API resource and starting point for the Realm API.</text></paragraph></section><section><heading id="api-key-apis"><text>API Key APIs</text></heading><directive domain="mongodb" name="operation" hash="get-/groups/{groupid}/apps/{appid}/api_keys" method="get" path="/groups/{groupId}/apps/{appId}/api_keys"><paragraph><text>List </text><ref_role domain="std" name="doc" fileid="['/authentication/api-key', '']"><text>API keys</text></ref_role><text> associated with a Realm app.</text></paragraph><section><heading id="path-parameters"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem></list></directive></section><section><heading id="header-parameters"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="responses"><text>Responses</text></heading><paragraph><literal><text>200</text></literal><text>: The API keys were successfully listed.</text></paragraph><code lang="json" copyable="True">[ { "_id": "string", "name": "string", "disabled": "boolean" } ]</code><directive name="list-table" header-rows="1" widths="35 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>[]</text></literal></paragraph></listItem><listItem><paragraph><text>array of objects</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>[]._id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>[].name</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>[].disabled</text></literal></paragraph></listItem><listItem><paragraph><text>boolean</text></paragraph></listItem><listItem></listItem></list></listItem></list></directive></section></directive><directive domain="mongodb" name="operation" hash="post-/groups/{groupid}/apps/{appid}/api_keys" method="post" path="/groups/{groupId}/apps/{appId}/api_keys"><paragraph><text>Create a new </text><ref_role domain="std" name="doc" fileid="['/authentication/api-key', '']"><text>API key</text></ref_role><text>.</text></paragraph><section><heading id="id1"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem></list></directive></section><section><heading id="id2"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="request-body"><text>Request Body </text><role domain="mongodb" name="required" target="True"></role></heading><paragraph><text>The API key to create.</text></paragraph><code lang="json" copyable="True">{ "name": "string" }</code><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>name</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem></listItem></list></listItem></list></directive></section><section><heading id="id3"><text>Responses</text></heading><paragraph><literal><text>201</text></literal><text>: The API key was successfully created.</text></paragraph><code lang="json" copyable="True">{ "_id": "string", "key": "string", "name": "string", "disabled": "string" }</code><directive name="list-table" header-rows="1" widths="35 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>key</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>name</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>disabled</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem></list></directive></section></directive></section><section><heading id="billing-apis"><text>Billing APIs</text></heading><directive domain="mongodb" name="operation" hash="get-/groups/{groupid}/apps/{appid}/measurements/" method="get" path="/groups/{groupId}/apps/{appId}/measurements/"><paragraph><text>List the request, compute, sync, data transfer, and memory usage of a specific app in a given period for </text><ref_role domain="std" name="doc" fileid="['/billing', '']"><text>billing</text></ref_role><text> purposes.</text></paragraph><section><heading id="id4"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem></list></directive></section><section><heading id="query-parameters"><text>Query Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>start</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>The ISO 8601 date and time of the start of the query period. Default is 00:00:00 UTC on the first day of the current month.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>end</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>The ISO 8601 date and time of the end of the query period. Default is 23:59:59 UTC on the the last day of the current month.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>granularity</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>Specifies the granularity of the query period, either P31D (31 day) or PT1H (1 hour). Default is P31D.</text></paragraph><paragraph><text>Possible Values:</text></paragraph><list enumtype="unordered"><listItem><paragraph><literal><text>P31D</text></literal></paragraph></listItem><listItem><paragraph><literal><text>PT1H</text></literal></paragraph></listItem></list></listItem></list></listItem></list></directive></section><section><heading id="id5"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="id6"><text>Responses</text></heading><paragraph><literal><text>200</text></literal><text>: The measurements were successfully returned.</text></paragraph><code lang="json" copyable="True">{ "start": "string", "end": "string", "granularity": "string", "group_id": "string", "appId": "string", "appName": "string", "measurements": [ { "name": "string", "units": "string", "data_points": [ { "timestamp": "string", "value": "number" } ] } ] }</code><directive name="list-table" header-rows="1" widths="35 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>start</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The RFC 3339 date and time of the start of the query period, which can be specified with the </text><literal><text>start</text></literal><text> query parameter.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>end</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The RFC 3339 date and time of the end of the query period, which can be specified with the </text><literal><text>end</text></literal><text> query parameter.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>granularity</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The granularity, which can be specified with the </text><literal><text>granularity</text></literal><text> query parameter.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>group_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The Realm app ID specified by the </text><literal><text>appId</text></literal><text> path parameter.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appName</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The name of the Realm app specified by the </text><literal><text>appId</text></literal><text> path parameter.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>measurements</text></literal></paragraph></listItem><listItem><paragraph><text>array</text></paragraph></listItem><listItem><paragraph><text>The array of measurements.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>measurements.[].name</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The usage metric represented by each data point. See </text><ref_role domain="std" name="doc" fileid="['/billing', '']"><text>billing</text></ref_role><text>.</text></paragraph><paragraph><text>Possible Values:</text></paragraph><list enumtype="unordered"><listItem><paragraph><literal><text>request_count</text></literal></paragraph></listItem><listItem><paragraph><literal><text>compute_time</text></literal></paragraph></listItem><listItem><paragraph><literal><text>data_out</text></literal></paragraph></listItem><listItem><paragraph><literal><text>sync_time</text></literal></paragraph></listItem><listItem><paragraph><literal><text>mem_usage</text></literal></paragraph></listItem></list></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>measurements.[].units</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The unit of the </text><literal><text>value</text></literal><text> of each data point.</text></paragraph><paragraph><text>Possible Values:</text></paragraph><list enumtype="unordered"><listItem><paragraph><literal><text>&lt;empty string&gt;</text></literal></paragraph></listItem><listItem><paragraph><literal><text>HOURS</text></literal></paragraph></listItem><listItem><paragraph><literal><text>GIGABYTES</text></literal></paragraph></listItem><listItem><paragraph><literal><text>GIGABYTE_SECONDS</text></literal></paragraph></listItem></list></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>measurements.[].data_points</text></literal></paragraph></listItem><listItem><paragraph><text>array</text></paragraph></listItem><listItem><paragraph><text>The array of data points for this measurement. A finer </text><literal><text>granularity</text></literal><text> results in more data points.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>measurements.[].data_points.[].timestamp</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The ISO 8601 date and time of the data point.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>measurements.[].data_points.[].value</text></literal></paragraph></listItem><listItem><paragraph><text>number</text></paragraph></listItem><listItem><paragraph><text>The value at the time in the </text><literal><text>unit</text></literal><text> of the measurement.</text></paragraph></listItem></list></listItem></list></directive><paragraph><literal><text>400</text></literal><text>: There is an error in the request.</text></paragraph><code lang="json" copyable="True">{ "error": "string" }</code><directive name="list-table" header-rows="1" widths="35 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>error</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>A message describing the error.</text></paragraph></listItem></list></listItem></list></directive></section></directive></section><section><heading id="logging-apis"><text>Logging APIs</text></heading><directive domain="mongodb" name="operation" hash="get-/groups/{groupid}/apps/{appid}/logs" method="get" path="/groups/{groupId}/apps/{appId}/logs"><paragraph><text>Retrieve MongoDB Realm logs.</text></paragraph><section><heading id="id7"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem></list></directive></section><section><heading id="id8"><text>Query Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>co_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>Return only log messages associated with the given request ID.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>errors_only</text></literal></paragraph></listItem><listItem><paragraph><text>boolean</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>Whether to only return errors.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>user_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>Return only log messages associated with the given </text><literal><text>user_id</text></literal><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>start_date</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>The date and time in ISO 8601 at which to begin returning results, exclusive.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>end_date</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>The date and time in ISO 8601 at which to cease returning results, inclusive.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>skip</text></literal></paragraph></listItem><listItem><paragraph><text>integer</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>The offset number of matching log entries to skip before including them in the response.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>limit</text></literal></paragraph></listItem><listItem><paragraph><text>integer</text></paragraph></listItem><listItem><paragraph><text>optional</text></paragraph></listItem><listItem><paragraph><text>The maximum number of log entries to include in the response. If the</text></paragraph></listItem></list><paragraph><text>query matches more than this many logs, it returns documents in ascending order by date until the limit is reached.</text></paragraph></listItem></list></directive></section><section><heading id="id9"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="id10"><text>Responses</text></heading><paragraph><literal><text>200</text></literal><text>: Successfully retrieved.</text></paragraph><code lang="json" copyable="True">{ "logs": [ { "_id": "string", "co_id": "string", "domain_id": "string", "app_id": "string", "group_id": "string", "request_url": "string", "request_method": "string", "started": "string", "completed": "string", "error": "string", "error_code": "string", "status": "integer" } ], "nextEndDate": "string", "nextSkip": "integer" }</code><directive name="list-table" header-rows="1" widths="35 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs</text></literal></paragraph></listItem><listItem><paragraph><text>array</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[]._id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].co_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].domain_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].app_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].group_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].request_url</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].request_method</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].started</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].completed</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].error</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].error_code</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>logs.[].status</text></literal></paragraph></listItem><listItem><paragraph><text>integer</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>nextEndDate</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>The end date and time of the next page of log entries in ISO 8601 format. MongoDB Realm paginates the result sets of queries that match more than 100 log entries and includes this field in paginated responses. To get the next page of up to 100 entries, pass this value as the </text><literal><text>end_date</text></literal><text> parameter in a subsequent request.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>nextSkip</text></literal></paragraph></listItem><listItem><paragraph><text>integer</text></paragraph></listItem><listItem><paragraph><text>The offset into the next page of log entries in ISO 8601 format. MongoDB Realm paginates the result sets of queries that match more than 100 log entries and includes this field in paginated responses where the first entry on the next page has the same timestamp as the last entry on this page. To get the next page of up to 100 entries, pass this value, if it is present, as the </text><literal><text>skip</text></literal><text> parameter in a subsequent request.</text></paragraph></listItem></list></listItem></list></directive></section></directive></section><section><heading id="services-apis"><text>Services APIs</text></heading><directive domain="mongodb" name="operation" hash="get-/groups/{groupid}/apps/{appid}/services/{serviceid}" method="get" path="/groups/{groupId}/apps/{appId}/services/{serviceId}"><paragraph><text>Retrieve a </text><ref_role domain="std" name="label" target="services"><text>service</text></ref_role><text>.</text></paragraph><section><heading id="description"><text>Description</text></heading><paragraph><text>Test description here.</text></paragraph></section><section><heading id="id11"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>serviceId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>Service ID</text></paragraph></listItem></list></listItem></list></directive></section><section><heading id="id12"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="id13"><text>Responses</text></heading><paragraph><literal><text>200</text></literal><text>: The service was successfully deleted.</text></paragraph><code lang="json" copyable="True">{ "_id": "string", "name": "string", "type": "string", "version": "integer" }</code><directive name="list-table" header-rows="1" widths="35 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Field</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>_id</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>name</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>type</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>version</text></literal></paragraph></listItem><listItem><paragraph><text>integer</text></paragraph></listItem><listItem></listItem></list></listItem></list></directive></section></directive><directive domain="mongodb" name="operation" hash="delete-/groups/{groupid}/apps/{appid}/services/{serviceid}" method="delete" path="/groups/{groupId}/apps/{appId}/services/{serviceId}"><paragraph><text>Delete a </text><ref_role domain="std" name="label" target="services"><text>service</text></ref_role><text>.</text></paragraph><section><heading id="id14"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>serviceId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>Service ID</text></paragraph></listItem></list></listItem></list></directive></section><section><heading id="id15"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="id16"><text>Responses</text></heading><paragraph><literal><text>204</text></literal><text>: The service was successfully deleted.</text></paragraph></section></directive><directive domain="mongodb" name="operation" hash="patch-/groups/{groupid}/apps/{appid}/services/{serviceid}" method="patch" path="/groups/{groupId}/apps/{appId}/services/{serviceId}"><paragraph><text>Update a </text><ref_role domain="std" name="label" target="services"><text>service</text></ref_role><text>.</text></paragraph><section><heading id="id17"><text>Path Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>groupId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>An </text><substitution_reference name="atlas"></substitution_reference><text> </text><reference refuri="https://docs.atlas.mongodb.com/tutorial/manage-projects/"><text>Project/Group ID</text></reference><text>.</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>appId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The ObjectID of your application.</text></paragraph></listItem></list><paragraph><ref_role domain="std" name="label" target="realm-api-project-and-application-ids"></ref_role><text> demonstrates how to find this value.</text></paragraph></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>serviceId</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>Service ID</text></paragraph></listItem></list></listItem></list></directive></section><section><heading id="id18"><text>Header Parameters</text></heading><directive name="list-table" header-rows="1" widths="20 15 15 50"><list enumtype="unordered"><listItem><list enumtype="unordered"><listItem><paragraph><text>Name</text></paragraph></listItem><listItem><paragraph><text>Type</text></paragraph></listItem><listItem><paragraph><text>Necessity</text></paragraph></listItem><listItem><paragraph><text>Description</text></paragraph></listItem></list></listItem><listItem><list enumtype="unordered"><listItem><paragraph><literal><text>Authorization</text></literal></paragraph></listItem><listItem><paragraph><text>string</text></paragraph></listItem><listItem><paragraph><text>required</text></paragraph></listItem><listItem><paragraph><text>The authorization token provided in the </text><literal><text>access_token</text></literal><text> field of</text></paragraph></listItem></list><paragraph><text>the </text><ref_role domain="std" name="label" target="post-/auth/providers/{provider}/login"></ref_role><text> and </text><ref_role domain="std" name="label" target="post-/auth/session"></ref_role><text> API endpoints.</text></paragraph></listItem></list></directive></section><section><heading id="id19"><text>Responses</text></heading><paragraph><literal><text>200</text></literal><text>: Successfully updated.</text></paragraph></section></directive></section></directive></root> """, )
464.12381
6,070
0.745942
5,970
48,733
6.064489
0.055611
0.156884
0.150808
0.21787
0.918326
0.912222
0.905704
0.887336
0.873747
0.856097
0
0.006863
0.034248
48,733
104
6,071
468.586538
0.762409
0.000882
0
0
0
0
0.040689
0
0
0
0
0
0.047619
1
0.047619
false
0
0.238095
0
0.285714
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
df6b2bbc98f05fb4c97735c897fd6e0ce0d1cb46
148
py
Python
lib/config_utils/__init__.py
Wastoon/TSAL
0f880c600f1a2e128de9c9fdfb94ae0776948cbe
[ "Apache-2.0" ]
3
2021-01-26T07:36:08.000Z
2021-04-25T13:47:12.000Z
lib/config_utils/__init__.py
Wastoon/TSAL
0f880c600f1a2e128de9c9fdfb94ae0776948cbe
[ "Apache-2.0" ]
null
null
null
lib/config_utils/__init__.py
Wastoon/TSAL
0f880c600f1a2e128de9c9fdfb94ae0776948cbe
[ "Apache-2.0" ]
null
null
null
# from .configure_utils import load_configure from .args import obtain_args as obtain_basic_args #from .lk_args import obtain_args as obtain_lk_args
37
51
0.858108
25
148
4.72
0.4
0.169492
0.271186
0.338983
0.474576
0.474576
0
0
0
0
0
0
0.108108
148
4
51
37
0.893939
0.337838
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
df6bac698514834a247adcffca080220120191c6
2,272
py
Python
accounts/forms.py
Omwakwe/twofactordemo
7ed27f6f9e27dc9e82562007aa454e9f0e051022
[ "MIT" ]
2
2020-04-12T08:39:00.000Z
2020-04-22T15:57:14.000Z
accounts/forms.py
Omwakwe/twofactordemo
7ed27f6f9e27dc9e82562007aa454e9f0e051022
[ "MIT" ]
null
null
null
accounts/forms.py
Omwakwe/twofactordemo
7ed27f6f9e27dc9e82562007aa454e9f0e051022
[ "MIT" ]
null
null
null
from django import forms from accounts.models import * class SignupForm(forms.ModelForm): user_name = forms.CharField(widget=forms.TextInput(attrs={'class':"form-control input-sm", })) password = forms.CharField(widget=forms.PasswordInput(attrs={'class':"form-control input-sm", })) password_confirm = forms.CharField(widget=forms.PasswordInput(attrs={'class':"form-control input-sm", })) first_name = forms.CharField(required=False,widget=forms.TextInput(attrs={'class':"form-control input-sm", })) second_name = forms.CharField(required=False,widget=forms.TextInput(attrs={'class':"form-control input-sm", })) last_name = forms.CharField(required=False,widget=forms.TextInput(attrs={'class':"form-control input-sm", })) email = forms.EmailField(required=False, widget=forms.EmailInput(attrs={'class': "form-control input-sm",})) class Meta: model = User fields = ('user_name','first_name','second_name','last_name'\ ,'email', 'password','password_confirm',) class LoginForm(forms.Form): user_name = forms.CharField(widget=forms.TextInput(attrs={'class':"form-control input-sm", })) password = forms.CharField(widget=forms.PasswordInput(attrs={'class':"form-control input-sm", })) class Meta: fields = ('user_name', 'password',) class ProfileForm(forms.ModelForm): user_name = forms.CharField(widget=forms.TextInput(attrs={'class':"form-control input-sm", })) old_password = forms.CharField(widget=forms.PasswordInput(attrs={'class':"form-control input-sm", })) new_password = forms.CharField(widget=forms.PasswordInput(attrs={'class':"form-control input-sm", })) first_name = forms.CharField(required=False,widget=forms.TextInput(attrs={'class':"form-control input-sm", })) second_name = forms.CharField(required=False,widget=forms.TextInput(attrs={'class':"form-control input-sm", })) last_name = forms.CharField(required=False,widget=forms.TextInput(attrs={'class':"form-control input-sm", })) email = forms.EmailField(widget=forms.EmailInput(attrs={'class': "form-control input-sm",})) class Meta: model = User fields = ('user_name','first_name','second_name','last_name'\ ,'email', 'old_password','new_password',)
61.405405
115
0.705546
279
2,272
5.659498
0.132616
0.111463
0.141862
0.212793
0.878404
0.878404
0.878404
0.873338
0.867638
0.867638
0
0
0.120158
2,272
37
116
61.405405
0.789895
0
0
0.580645
0
0
0.25033
0
0
0
0
0
0
1
0
false
0.258065
0.064516
0
0.774194
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
10d2d3785ae8037376ac2c4b1e32d635e2ff33f2
57
py
Python
site_scons/site_init.py
bdbaddog/scons-processing-demo
99db34facd276aaf6d1691807969697640cf16ce
[ "MIT" ]
null
null
null
site_scons/site_init.py
bdbaddog/scons-processing-demo
99db34facd276aaf6d1691807969697640cf16ce
[ "MIT" ]
null
null
null
site_scons/site_init.py
bdbaddog/scons-processing-demo
99db34facd276aaf6d1691807969697640cf16ce
[ "MIT" ]
null
null
null
# Sample site_init.py print("In site_tools/site_init.py")
28.5
35
0.789474
11
57
3.818182
0.636364
0.380952
0.47619
0
0
0
0
0
0
0
0
0
0.070175
57
2
35
28.5
0.792453
0.333333
0
0
0
0
0.702703
0.621622
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
1
0
8
80285dad4459405b4cf1035e4cab9bf7c9b0bae5
398
py
Python
rick/util/cast.py
oddbit-project/rick
6f6d9d68a7192a05393591a8ecddebaff5e385aa
[ "BSD-2-Clause" ]
null
null
null
rick/util/cast.py
oddbit-project/rick
6f6d9d68a7192a05393591a8ecddebaff5e385aa
[ "BSD-2-Clause" ]
null
null
null
rick/util/cast.py
oddbit-project/rick
6f6d9d68a7192a05393591a8ecddebaff5e385aa
[ "BSD-2-Clause" ]
null
null
null
def cast_str(value): try: return str(value) if value is not None else None except ValueError: return None def cast_int(value): try: return int(value) if value is not None else None except ValueError: return None def cast_float(value): try: return float(value) if value is not None else None except ValueError: return None
19.9
58
0.635678
57
398
4.385965
0.263158
0.084
0.168
0.168
0.716
0.716
0.716
0.716
0.716
0.716
0
0
0.311558
398
19
59
20.947368
0.912409
0
0
0.6
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.6
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
7
33a6f9a8d6fb9b4ff69093397d879055d45768f5
20,376
py
Python
src/python_pachyderm/proto/admin/v1_8/pps/pps_pb2_grpc.py
barretthinson/python-pachyderm
82cea22d1105d70833a5522ccac750ca521694ff
[ "Apache-2.0" ]
null
null
null
src/python_pachyderm/proto/admin/v1_8/pps/pps_pb2_grpc.py
barretthinson/python-pachyderm
82cea22d1105d70833a5522ccac750ca521694ff
[ "Apache-2.0" ]
null
null
null
src/python_pachyderm/proto/admin/v1_8/pps/pps_pb2_grpc.py
barretthinson/python-pachyderm
82cea22d1105d70833a5522ccac750ca521694ff
[ "Apache-2.0" ]
null
null
null
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from python_pachyderm.proto.admin.v1_8.pps import pps_pb2 as client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class APIStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.CreateJob = channel.unary_unary( '/pps_1_8.API/CreateJob', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.CreateJobRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.Job.FromString, ) self.InspectJob = channel.unary_unary( '/pps_1_8.API/InspectJob', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.InspectJobRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfo.FromString, ) self.ListJob = channel.unary_unary( '/pps_1_8.API/ListJob', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListJobRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfos.FromString, ) self.ListJobStream = channel.unary_stream( '/pps_1_8.API/ListJobStream', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListJobRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfo.FromString, ) self.FlushJob = channel.unary_stream( '/pps_1_8.API/FlushJob', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.FlushJobRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfo.FromString, ) self.DeleteJob = channel.unary_unary( '/pps_1_8.API/DeleteJob', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.DeleteJobRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StopJob = channel.unary_unary( '/pps_1_8.API/StopJob', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.StopJobRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectDatum = channel.unary_unary( '/pps_1_8.API/InspectDatum', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.InspectDatumRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.DatumInfo.FromString, ) self.ListDatum = channel.unary_unary( '/pps_1_8.API/ListDatum', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumResponse.FromString, ) self.ListDatumStream = channel.unary_stream( '/pps_1_8.API/ListDatumStream', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumStreamResponse.FromString, ) self.RestartDatum = channel.unary_unary( '/pps_1_8.API/RestartDatum', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.RestartDatumRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CreatePipeline = channel.unary_unary( '/pps_1_8.API/CreatePipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.CreatePipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectPipeline = channel.unary_unary( '/pps_1_8.API/InspectPipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.InspectPipelineRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.PipelineInfo.FromString, ) self.ListPipeline = channel.unary_unary( '/pps_1_8.API/ListPipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListPipelineRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.PipelineInfos.FromString, ) self.DeletePipeline = channel.unary_unary( '/pps_1_8.API/DeletePipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.DeletePipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StartPipeline = channel.unary_unary( '/pps_1_8.API/StartPipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.StartPipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StopPipeline = channel.unary_unary( '/pps_1_8.API/StopPipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.StopPipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.RerunPipeline = channel.unary_unary( '/pps_1_8.API/RerunPipeline', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.RerunPipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteAll = channel.unary_unary( '/pps_1_8.API/DeleteAll', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetLogs = channel.unary_stream( '/pps_1_8.API/GetLogs', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.GetLogsRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.LogMessage.FromString, ) self.GarbageCollect = channel.unary_unary( '/pps_1_8.API/GarbageCollect', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.GarbageCollectRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.GarbageCollectResponse.FromString, ) self.ActivateAuth = channel.unary_unary( '/pps_1_8.API/ActivateAuth', request_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ActivateAuthRequest.SerializeToString, response_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ActivateAuthResponse.FromString, ) class APIServicer(object): # missing associated documentation comment in .proto file pass def CreateJob(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def InspectJob(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListJob(self, request, context): """ListJob returns information about current and past Pachyderm jobs. This is deprecated in favor of ListJobStream """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListJobStream(self, request, context): """ListJobStream returns information about current and past Pachyderm jobs. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def FlushJob(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteJob(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StopJob(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def InspectDatum(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListDatum(self, request, context): """ListDatum returns information about each datum fed to a Pachyderm job. This is deprecated in favor of ListDatumStream """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListDatumStream(self, request, context): """ListDatumStream returns information about each datum fed to a Pachyderm job """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RestartDatum(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreatePipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def InspectPipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListPipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeletePipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StartPipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StopPipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RerunPipeline(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAll(self, request, context): """DeleteAll deletes everything """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetLogs(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GarbageCollect(self, request, context): """Garbage collection """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ActivateAuth(self, request, context): """An internal call that causes PPS to put itself into an auth-enabled state (all pipeline have tokens, correct permissions, etcd) """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_APIServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateJob': grpc.unary_unary_rpc_method_handler( servicer.CreateJob, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.CreateJobRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.Job.SerializeToString, ), 'InspectJob': grpc.unary_unary_rpc_method_handler( servicer.InspectJob, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.InspectJobRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfo.SerializeToString, ), 'ListJob': grpc.unary_unary_rpc_method_handler( servicer.ListJob, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListJobRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfos.SerializeToString, ), 'ListJobStream': grpc.unary_stream_rpc_method_handler( servicer.ListJobStream, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListJobRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfo.SerializeToString, ), 'FlushJob': grpc.unary_stream_rpc_method_handler( servicer.FlushJob, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.FlushJobRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.JobInfo.SerializeToString, ), 'DeleteJob': grpc.unary_unary_rpc_method_handler( servicer.DeleteJob, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.DeleteJobRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'StopJob': grpc.unary_unary_rpc_method_handler( servicer.StopJob, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.StopJobRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'InspectDatum': grpc.unary_unary_rpc_method_handler( servicer.InspectDatum, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.InspectDatumRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.DatumInfo.SerializeToString, ), 'ListDatum': grpc.unary_unary_rpc_method_handler( servicer.ListDatum, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumResponse.SerializeToString, ), 'ListDatumStream': grpc.unary_stream_rpc_method_handler( servicer.ListDatumStream, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListDatumStreamResponse.SerializeToString, ), 'RestartDatum': grpc.unary_unary_rpc_method_handler( servicer.RestartDatum, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.RestartDatumRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'CreatePipeline': grpc.unary_unary_rpc_method_handler( servicer.CreatePipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.CreatePipelineRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'InspectPipeline': grpc.unary_unary_rpc_method_handler( servicer.InspectPipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.InspectPipelineRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.PipelineInfo.SerializeToString, ), 'ListPipeline': grpc.unary_unary_rpc_method_handler( servicer.ListPipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ListPipelineRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.PipelineInfos.SerializeToString, ), 'DeletePipeline': grpc.unary_unary_rpc_method_handler( servicer.DeletePipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.DeletePipelineRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'StartPipeline': grpc.unary_unary_rpc_method_handler( servicer.StartPipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.StartPipelineRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'StopPipeline': grpc.unary_unary_rpc_method_handler( servicer.StopPipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.StopPipelineRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'RerunPipeline': grpc.unary_unary_rpc_method_handler( servicer.RerunPipeline, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.RerunPipelineRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'DeleteAll': grpc.unary_unary_rpc_method_handler( servicer.DeleteAll, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'GetLogs': grpc.unary_stream_rpc_method_handler( servicer.GetLogs, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.GetLogsRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.LogMessage.SerializeToString, ), 'GarbageCollect': grpc.unary_unary_rpc_method_handler( servicer.GarbageCollect, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.GarbageCollectRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.GarbageCollectResponse.SerializeToString, ), 'ActivateAuth': grpc.unary_unary_rpc_method_handler( servicer.ActivateAuth, request_deserializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ActivateAuthRequest.FromString, response_serializer=client_dot_admin_dot_v1__8_dot_pps_dot_pps__pb2.ActivateAuthResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'pps_1_8.API', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
49.941176
120
0.778465
2,434
20,376
5.997124
0.066146
0.056724
0.066178
0.080359
0.843255
0.843255
0.837912
0.742413
0.735562
0.729533
0
0.016116
0.153416
20,376
407
121
50.063882
0.830087
0.080585
0
0.406528
1
0
0.096641
0.025327
0
0
0
0
0
1
0.071217
false
0.050445
0.008902
0
0.086053
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
33c10d3f400f2d5c1e145dcbf225ff69adc7efc9
62,452
py
Python
tests/test_public.py
gsakkis/typecheck
bc49d8651eff5944179967af55156219a0228b03
[ "MIT" ]
null
null
null
tests/test_public.py
gsakkis/typecheck
bc49d8651eff5944179967af55156219a0228b03
[ "MIT" ]
null
null
null
tests/test_public.py
gsakkis/typecheck
bc49d8651eff5944179967af55156219a0228b03
[ "MIT" ]
null
null
null
import sys import types from support import TestCase import typecheck from typecheck import Set def check_type(typ, obj): typecheck.check_type(typ, None, obj) class Test_returns(TestCase): def test_success_1(self): from typecheck import returns @returns(int, int, int) def foo(): return 5, 6, 7 assert foo() == (5, 6, 7) assert foo.type_return == (int, int, int) def test_success_2(self): from typecheck import returns @returns([int]) def foo(): return [4, 5, 6] assert foo() == [4, 5, 6] assert foo.type_return == [int] def test_success_3(self): from typecheck import returns @returns([int], int, str) def foo(): return [4, 5, 6], 5, "foo" assert foo() == ([4, 5, 6], 5, "foo") assert foo.type_return == ([int], int, str) def test_success_4(self): from typecheck import returns @returns(int) def foo(): return 7 assert foo() == 7 assert foo.type_return == int def test_success_5(self): from typecheck import returns @returns((int,)) def foo(): return (7,) assert foo() == (7,) assert foo.type_return == (int,) def test_failure_1(self): from typecheck import returns, TypeCheckError, _TC_TypeError @returns(int, int, int) def foo(): return 5, 6 assert foo.type_return == (int, int, int) try: foo() except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == (int, int, int) assert e.internal.wrong == (int, int) self.assertEqual(str(e), "Return value: for (5, 6), expected (<type 'int'>, <type 'int'>, <type 'int'>), got (<type 'int'>, <type 'int'>)") else: raise AssertionError("Succeeded incorrectly") def test_failure_2(self): from typecheck import returns, TypeCheckError from typecheck import _TC_TypeError, _TC_IndexError @returns([int]) def foo(): return [4, 5, 6.0] assert foo.type_return == [int] try: foo() except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 2 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.wrong == float assert e.internal.inner.right == int self.assertEqual(str(e), "Return value: for [4, 5, 6.0], at index 2, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_3(self): from typecheck import returns, TypeCheckError from typecheck import _TC_TypeError, _TC_IndexError @returns([int], int, str) def foo(): return [4, 5, 6], 5, ["foo"] assert foo.type_return == ([int], int, str) try: foo() except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 2 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.wrong == [str] assert e.internal.inner.right == str self.assertEqual(str(e), "Return value: for ([4, 5, 6], 5, ['foo']), at index 2, expected <type 'str'>, got [<type 'str'>]") else: raise AssertionError("Succeeded incorrectly") def test_failure_4(self): from typecheck import returns, TypeCheckError, _TC_TypeError @returns((int,)) def foo(): return 7 assert foo.type_return == (int,) try: foo() except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == (int,) assert e.internal.wrong == int self.assertEqual(str(e), "Return value: for 7, expected (<type 'int'>,), got <type 'int'>") else: raise AssertionError("Succeeded incorrectly") def test_decorator_returns_function(self): from typecheck import returns @returns((int,)) def foo(): return 7 assert isinstance(foo, types.FunctionType) class Test_yields(TestCase): def test_pass(self): from typecheck import yields @yields(int) def foo(a): yield a yield a + 1 yield a + 2 gen = foo(5) assert gen.next() == 5 assert gen.next() == 6 assert gen.next() == 7 assert foo.type_yield == int assert gen.type_yield == foo.type_yield try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") def test_fail(self): from typecheck import yields, TypeCheckError from typecheck import _TC_GeneratorError, _TC_TypeError @yields(int) def foo(a): yield a gen = foo(5.0) assert foo.type_yield == int assert gen.type_yield == foo.type_yield try: gen.next() except TypeCheckError, e: assert isinstance(e.internal, _TC_GeneratorError) assert e.internal.yield_no == 1 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.right == int assert e.internal.inner.wrong == float self.assertEqual(str(e), "At yield #1: for 5.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") def test_restartable(self): from typecheck import yields @yields(int) def foo(a): yield a yield a + 1 yield a + 2 assert foo.type_yield == int assert foo(5).type_yield == foo.type_yield for _ in (1, 2): gen = foo(5) assert gen.type_yield == foo.type_yield assert gen.next() == 5 assert gen.next() == 6 assert gen.next() == 7 try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") def test_fails_on_non_generators(self): from typecheck import yields @yields(int) def foo(a): return a + 1 try: assert foo(5) == 6 except TypeError, e: self.assertEqual(str(e), "yields only works for generators") else: raise AssertionError("Succeeded incorrectly") def test_decorator_returns_function(self): from typecheck import yields @yields(int) def foo(): yield 7 assert isinstance(foo, types.FunctionType) class Test_accepts(TestCase): def test_success_single_positional(self): from typecheck import accepts @accepts(int) def foo(int_1): return int_1 assert foo(6) == 6 assert foo.type_args == {'int_1': int} def test_success_multiple_positional(self): from typecheck import accepts @accepts(int, int, int) def foo(int_1, int_2, int_3): return int_1, int_2, int_3 assert foo(1, 2, 3) == (1, 2, 3) assert foo.type_args == {'int_1': int, 'int_2': int, 'int_3': int} def test_success_multiple_positional_type_by_kw(self): from typecheck import accepts @accepts(int_2=int, int_1=int, int_3=int) def foo(int_1, int_2, int_3): return int_1, int_2, int_3 assert foo(1, 2, 3) == (1, 2, 3) assert foo.type_args == {'int_1': int, 'int_2': int, 'int_3': int} def test_success_multiple_keyword(self): from typecheck import accepts @accepts(kw_1=int, kw_2=int, kw_3=int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo() == (5, 6, 7) assert foo(9, 9, 9) == (9, 9, 9) assert foo(kw_1=33, kw_3=88) == (33, 6, 88) assert foo(kw_3=55, kw_2=2) == (5, 2, 55) assert foo.type_args == {'kw_1': int, 'kw_2': int, 'kw_3': int} def test_success_multiple_keyword_type_by_pos(self): from typecheck import accepts # Checking type-specification, not arg-passing @accepts(int, int, int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo() == (5, 6, 7) assert foo(9, 9, 9) == (9, 9, 9) assert foo(kw_1=33, kw_3=88) == (33, 6, 88) assert foo(kw_3=55, kw_2=2) == (5, 2, 55) assert foo.type_args == {'kw_1': int, 'kw_2': int, 'kw_3': int} def test_success_kwargs_form_1(self): from typecheck import accepts # Type can be passed as a single type... # (in this case, the values of the dict # will be checked against the given type) @accepts(kwargs=int) def foo(**kwargs): return kwargs assert foo() == {} assert foo(int_1=5, int_2=8) == {'int_1': 5, 'int_2': 8} assert foo.type_args == {'kwargs': {str: int}} def test_success_kwargs_form_2(self): from typecheck import accepts # ...or in normal dict form # (in this case, full checking is done) @accepts(kwargs={str: int}) def foo(**kwargs): return kwargs assert foo() == {} assert foo(int_1=5, int_2=8) == {'int_1': 5, 'int_2': 8} assert foo.type_args == {'kwargs': {str: int}} def test_success_vargs_form_1(self): from typecheck import accepts # Type can be passed as a single type... @accepts(vargs=int) def foo(*vargs): return vargs assert foo() == () assert foo(5, 8) == (5, 8) assert foo.type_args == {'vargs': [int]} def test_success_vargs_form_2(self): from typecheck import accepts # ...or as an actual tuple. Note that # this form is useful if you want to # specify patterned tuples @accepts(vargs=[int]) def foo(*vargs): return vargs assert foo() == () assert foo(5, 8) == (5, 8) assert foo.type_args == {'vargs': [int]} def test_success_pos_and_kw(self): from typecheck import accepts @accepts(int, int, foo=int) def foo(req_1, req_2, foo=7): return (req_1, req_2, foo) assert foo(5, 6, foo=88) == (5, 6, 88) assert foo(5, 6) == (5, 6, 7) assert foo(foo=5, req_2=44, req_1=99) == (99, 44, 5) assert foo.type_args == {'req_1': int, 'req_2': int, 'foo': int} def test_success_unpacked_tuples(self): from typecheck import accepts @accepts(int, (int, (int, int)), int) def foo(req_1, (req_2, (req_3, req_4)), req_5): return (req_1, req_2, req_3, req_4, req_5) assert foo(4, (5, (6, 7)), 8) == (4, 5, 6, 7, 8) assert foo.type_args == {'req_1': int, ('req_2', ('req_3', 'req_4')): (int, (int, int)), 'req_5': int} def test_failure_single_positional(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int) def foo(int_1): return 7 assert foo.type_args == {'int_1': int} try: foo(6.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument int_1: for 6.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_multiple_positional_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, int) def foo(int_1, int_2, int_3): return 7 assert foo.type_args == {'int_1': int, 'int_2': int, 'int_3': int} try: foo(1, 2, 3.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument int_3: for 3.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_multiple_positional_2(self): from typecheck import accepts @accepts(int, int) def foo(a, b): return a, b try: foo(3) except TypeError, e: assert str(e) == "foo() takes exactly 2 arguments (1 given)" else: raise AssertionError("Failed to raise TypeError") def test_failure_multiple_positional_type_by_kw(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int_2=int, int_1=int, int_3=int) def foo(int_1, int_2, int_3): return 7 assert foo.type_args == {'int_1': int, 'int_2': int, 'int_3': int} try: foo(1, 2, 3.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument int_3: for 3.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_multiple_keyword_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(kw_1=int, kw_2=int, kw_3=int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo.type_args == {'kw_1': int, 'kw_2': int, 'kw_3': int} try: foo(9.0, 9, 9) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument kw_1: for 9.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_multiple_keyword_2(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(kw_1=int, kw_2=int, kw_3=int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo.type_args == {'kw_1': int, 'kw_2': int, 'kw_3': int} try: foo(kw_1=9.0, kw_3=88) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument kw_1: for 9.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Failed to raise TypeCheckError") def test_failure_multiple_keyword_3(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(kw_1=int, kw_2=int, kw_3=int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo.type_args == {'kw_1': int, 'kw_2': int, 'kw_3': int} try: foo(kw_3=9.0, kw_1=88) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument kw_3: for 9.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Failed to raise TypeCheckError") def test_failure_multiple_keyword_type_by_pos_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo.type_args == {'kw_1':int, 'kw_2': int, 'kw_3': int} try: foo(9.0, 9, 9) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument kw_1: for 9.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_multiple_keyword_type_by_pos_2(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo.type_args == {'kw_1':int, 'kw_2': int, 'kw_3': int} try: foo(kw_1=9.0, kw_3=88) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument kw_1: for 9.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_multiple_keyword_type_by_pos_3(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, kw_3=int) def foo(kw_1=5, kw_2=6, kw_3=7): return (kw_1, kw_2, kw_3) assert foo.type_args == {'kw_1':int, 'kw_2': int, 'kw_3': int} try: foo(kw_3=9.0, kw_1=88) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument kw_3: for 9.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_kwargs_form_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError, _TC_KeyValError # Type can be passed as a single type... # (in this case, the values of the dict # will be checked against the given type) @accepts(kwargs=int) def foo(**kwargs): return kwargs assert foo.type_args == {'kwargs': {str: int}} try: foo(int_1=5.0, int_2=8) except TypeCheckError, e: assert isinstance(e.internal, _TC_KeyValError) assert e.internal.key == 'int_1' assert e.internal.val == 5.0 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.right == int assert e.internal.inner.wrong == float self.assertEqual(str(e), "Argument kwargs: for {'int_1': 5.0, 'int_2': 8}, at key 'int_1', value 5.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_kwargs_form_2(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError, _TC_KeyValError # ...or in normal dict form # (in this case, full checking is done) @accepts(kwargs={str: int}) def foo(**kwargs): return kwargs assert foo.type_args == {'kwargs': {str: int}} try: foo(int_1=5.0, int_2=8) except TypeCheckError, e: assert isinstance(e.internal, _TC_KeyValError) assert e.internal.key == 'int_1' assert e.internal.val == 5.0 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.right == int assert e.internal.inner.wrong == float self.assertEqual(str(e), "Argument kwargs: for {'int_1': 5.0, 'int_2': 8}, at key 'int_1', value 5.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_vargs_form_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError, _TC_IndexError # Type can be passed as a single type... @accepts(vargs=int) def foo(*vargs): return vargs assert foo.type_args == {'vargs': [int]} try: foo(5, 8.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 1 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.right == int assert e.internal.inner.wrong == float self.assertEqual(str(e), "Argument vargs: for [5, 8.0], at index 1, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_vargs_form_2(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError, _TC_IndexError # ...or as an actual list. Note that # this form is useful if you want to # specify patterned lists @accepts(vargs=[int]) def foo(*vargs): return vargs assert foo.type_args == {'vargs': [int]} try: foo(5, 8.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 1 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.right == int assert e.internal.inner.wrong == float self.assertEqual(str(e), "Argument vargs: for [5, 8.0], at index 1, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_pos_and_kw_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, foo=int) def foo(req_1, req_2, foo=7): return (req_1, req_2, foo) assert foo.type_args == {'foo': int, 'req_1':int, 'req_2': int} try: foo(5, 6.0, foo=88) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument req_2: for 6.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_pos_and_kw_2(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, foo=int) def foo(req_1, req_2, foo=7): return (req_1, req_2, foo) assert foo.type_args == {'foo': int, 'req_1':int, 'req_2': int} try: foo(5, 6, foo=88.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument foo: for 88.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_pos_and_kw_3(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, int, foo=int) def foo(req_1, req_2, foo=7): return (req_1, req_2, foo) assert foo.type_args == {'foo': int, 'req_1':int, 'req_2': int} try: foo(foo=5, req_2=44, req_1=99.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument req_1: for 99.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_unpacked_tuples_1(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_IndexError, _TC_TypeError @accepts(int, (int, (int, int)), int) def foo(a, (b, (c, d)), e): return a, b, c, d, e assert foo.type_args == {'a': int, ('b', ('c', 'd')): (int, (int, int)), 'e': int} try: foo(5, (6, (7, 8.0)), 9) except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 1 assert isinstance(e.internal.inner, _TC_IndexError) assert e.internal.inner.index == 1 assert isinstance(e.internal.inner.inner, _TC_TypeError) assert e.internal.inner.inner.right == int assert e.internal.inner.inner.wrong == float self.assertEqual(str(e), "Argument (b, (c, d)): for (6, (7, 8.0)), at index 1, at index 1, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_failure_unpacked_tuples_2(self): from typecheck import accepts @accepts(int, (int, (int, int)), int) def foo(a, (b, (c, d)), e): return a, b, c, d, e assert foo.type_args == {'a': int, ('b', ('c', 'd')): (int, (int, int)), 'e': int} try: foo(5, (6, 4), 9) except TypeError, e: if sys.version_info[:2] >= (2, 5): self.assertEqual(str(e), "'int' object is not iterable") else: self.assertEqual(str(e), "unpack non-sequence") else: raise AssertionError("Succeeded incorrectly") def test_failure_unpacked_tuples_3(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int, (int, (int, int)), int) def foo(a, (b, (c, d)), e): return a, b, c, d, e assert foo.type_args == {'a': int, ('b', ('c', 'd')): (int, (int, int)), 'e': int} try: foo(5, (6, (7, 8)), (9, 10)) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == (int, int) self.assertEqual(str(e), "Argument e: for (9, 10), expected <type 'int'>, got (<type 'int'>, <type 'int'>)") else: raise AssertionError("Succeeded incorrectly") def test_generators_pass(self): from typecheck import accepts @accepts(int) def foo(a): yield a yield a + 1 yield a + 2 gen = foo(5) assert gen.next() == 5 assert gen.next() == 6 assert gen.next() == 7 assert foo.type_args == {'a': int} def test_generators_fail(self): from typecheck import accepts, TypeCheckError from typecheck import _TC_TypeError @accepts(int) def foo(a): yield a yield a + 1 yield a + 2 assert foo.type_args == {'a': int} try: foo(5.0) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument a: for 5.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_missing_pos(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_MissingTypeError try: @accepts(int, int, int) def foo(a, b, c, e): return a, b, c, e except TypeSignatureError, e: assert isinstance(e.internal, _TS_MissingTypeError) assert e.internal.parameter == 'e' self.assertEqual(str(e), "parameter 'e' lacks a type") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_missing_kw(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_MissingTypeError try: @accepts(a=int, b=int, c=int) def foo(a=5, b=6, c=7, d=8): return a, b, c, d except TypeSignatureError, e: assert isinstance(e.internal, _TS_MissingTypeError) assert e.internal.parameter == 'd' self.assertEqual(str(e), "parameter 'd' lacks a type") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_duplicate_kw(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_TwiceTypedError try: @accepts(int, a=int, b=int, c=int) def foo(a=5, b=6, c=7): return a, b, c except TypeSignatureError, e: assert isinstance(e.internal, _TS_TwiceTypedError) assert e.internal.parameter == 'a' assert e.internal.kw_type == int assert e.internal.pos_type == int self.assertEqual(str(e), "parameter 'a' is provided two types (<type 'int'> and <type 'int'>)") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_extra_pos(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_ExtraPositionalError try: @accepts(int, int, int, int) def foo(a, b, c): return a, b, c except TypeSignatureError, e: assert isinstance(e.internal, _TS_ExtraPositionalError) assert e.internal.type == int self.assertEqual(str(e), "an extra positional type has been supplied") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_extra_kw(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_ExtraKeywordError try: @accepts(a=int, b=int, c=int) def foo(a=5, b=6): return a, b except TypeSignatureError, e: assert isinstance(e.internal, _TS_ExtraKeywordError) assert e.internal.keyword == 'c' self.assertEqual(str(e), "the keyword 'c' in the signature is not in the function") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_unpack_nonsequence(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_TupleError try: @accepts(int, int) def foo(a, (b, c)): return a, b, c except TypeSignatureError, e: assert isinstance(e.internal, _TS_TupleError) assert e.internal.parameters == ('b', 'c') assert e.internal.types == int self.assertEqual(str(e), "the signature type <type 'int'> does not match ('b', 'c')") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_unpack_bad_sequence_1(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_TupleError try: @accepts(int, (int, int, int)) def foo(a, (b, c)): return a, b, c except TypeSignatureError, e: assert isinstance(e.internal, _TS_TupleError) assert e.internal.parameters == ('b', 'c') assert e.internal.types == (int, int, int) self.assertEqual(str(e), "the signature type (<type 'int'>, <type 'int'>, <type 'int'>) does not match ('b', 'c')") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_unpack_bad_sequence_2(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_TupleError try: @accepts(int, (int, int)) def foo(a, (b, c, d)): return a, b, c, d except TypeSignatureError, e: assert isinstance(e.internal, _TS_TupleError) assert e.internal.parameters == ('b', 'c', 'd') assert e.internal.types == (int, int) self.assertEqual(str(e), "the signature type (<type 'int'>, <type 'int'>) does not match ('b', 'c', 'd')") else: raise AssertionError("Succeeded incorrectly") def test_bad_signature_unpack_bad_sequence_3(self): from typecheck import accepts, TypeSignatureError from typecheck import _TS_TupleError try: @accepts(int, (int, (int, int), (int, int))) def foo(a, (b, (c, d, e), (f, g))): return a, b, c, d, e, f, g except TypeSignatureError, e: assert isinstance(e.internal, _TS_TupleError) assert e.internal.parameters == ('b', ('c', 'd', 'e'), ('f', 'g')) assert e.internal.types == (int, (int, int), (int, int)) self.assertEqual(str(e), "the signature type (<type 'int'>, (<type 'int'>, <type 'int'>), (<type 'int'>, <type 'int'>)) does not match ('b', ('c', 'd', 'e'), ('f', 'g'))") else: raise AssertionError("Succeeded incorrectly") def test_decorator_returns_function(self): from typecheck import accepts @accepts(int) def foo(a): return 7 assert isinstance(foo, types.FunctionType) def test_method_retains_name_docstring(self): from typecheck import accepts @accepts(int) def f(a): 'f docstring' self.assertEquals(f.__name__, 'f') self.assertEquals(f.__doc__, 'f docstring') def test_oldstyle_classes_accepted(self): from typecheck import accepts, TypeCheckError class T: pass class T2(T): pass @accepts(T) def f(t_instance): pass f(T()) f(T2()) self.assertRaises(TypeCheckError, f, T) self.assertRaises(TypeCheckError, f, T2) class SetTests(TestCase): def test_success_basic(self): check_type(Set([int]), set([4, 5, 6, 4, 5, 6])) def test_success_mutlitypes(self): check_type(Set([int, float]), set([4, 5.0, 6, 4, 5, 6.0])) def test_success_nested(self): from typecheck import Or check_type(Set([(int, int)]), set([(4, 5), (6, 7)])) check_type(Set([Or(int, float)]), set([4, 4.0, 5, 5.0])) def test_failure(self): from typecheck import _TC_KeyError, _TC_TypeError try: check_type(Set([int]), set([4, 5, 6.0])) except _TC_KeyError, e: assert e.key == 6.0 assert isinstance(e.inner, _TC_TypeError) assert e.inner.right == int assert e.inner.wrong == float else: raise AssertionError("Did not raise the proper exception") def test_failure_multitypes(self): from typecheck import Or, _TC_KeyError, _TC_TypeError try: check_type(Set([int, float]), set([4, 5, 6.0, 's'])) except _TC_KeyError, e: assert e.key == 's' assert isinstance(e.inner, _TC_TypeError) assert e.inner.right == Or(int, float) assert e.inner.wrong == str else: raise AssertionError("Did not raise the proper exception") def test_failure_nested(self): from typecheck import _TC_KeyError, _TC_TypeError try: check_type(Set([(int, int)]), set([(4, 5), (4, 6.0)])) except _TC_KeyError, e: assert e.key == (4, 6.0) assert isinstance(e.inner, _TC_TypeError) assert e.inner.right == (int, int) assert e.inner.wrong == (int, float) else: raise AssertionError("Did not raise the proper exception") def test_equality(self): class A(object): pass class B(A): pass eq_tests = [ (Set([str]), Set([str])), (Set([A, B]), Set([A, B])), (Set([int, int, str]), Set([int, str])), (Set([int, str]), Set([str, int])), (Set([Set([int, float]), int]), Set([Set([float, int]), int])), (Set([Set([int, str]), Set([int, str])]), Set([Set([int, str])])) ] ne_tests = [ (Set([A, B]), Set([B, B])), (Set([A, B]), Set([A, A])), (Set([Set([int, str])]), Set([Set([Set([int, str])])])), (Set([int, int]), set([int, int])) ] self.multipleAssertEqual(eq_tests, ne_tests) def test_hash(self): class A(object): pass class B(A): pass eq_tests = [ (Set([str]), Set([str])), (Set([A, B]), Set([A, B])), (Set([int, int, str]), Set([int, str])), (Set([int, str]), Set([str, int])), (Set([Set([int, float]), int]), Set([Set([float, int]), int])), (Set([Set([int, str]), Set([int, str])]), Set([Set([int, str])])) ] ne_tests = [ (Set([A, B]), Set([B, B])), (Set([A, B]), Set([A, A])), (Set([Set([int, str])]), Set([Set([Set([int, str])])])) ] self.multipleAssertEqualHashes(eq_tests, ne_tests) def test_Type_uses_it(self): from typecheck import Type t = Type(set([int, float])) assert isinstance(t, Set) class Test_cooperation(TestCase): def test_args_pass_return_pass(self): from typecheck import accepts, returns def run_test(dec_1, dec_2): @dec_1(int, int) @dec_2(int, int) def foo(a, b): return a, b assert foo(5, 6) == (5, 6) assert foo.type_return == (int, int) assert foo.type_args == {'a': int, 'b': int} run_test(accepts, returns) run_test(returns, accepts) def test_args_pass_return_fail(self): from typecheck import accepts, returns from typecheck import TypeCheckError, _TC_IndexError, _TC_TypeError def run_test(dec_1, dec_2): @dec_1(int, int) @dec_2(int, int) def foo(a, b): return a, float(b) assert foo.type_return == (int, int) assert foo.type_args == {'a': int, 'b': int} try: foo(5, 6) except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 1 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.right == int assert e.internal.inner.wrong == float self.assertEqual(str(e), "Return value: for (5, 6.0), at index 1, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") run_test(accepts, returns) run_test(returns, accepts) def test_args_return_builds_function(self): from typecheck import accepts, returns def run_test(dec_1, dec_2): @dec_1(int, int) @dec_2(int, int) def foo(a, b): return a, float(b) assert isinstance(foo, types.FunctionType) run_test(accepts, returns) run_test(returns, accepts) def test_args_pass_yield_pass(self): from typecheck import accepts, yields def run_test(dec_1, dec_2): @dec_1(int, int) @dec_2(int, int) def foo(a, b): yield a, b yield a + 1, b yield a, b + 1 assert foo.type_yield == (int, int) assert foo.type_args == {'a': int, 'b': int} gen = foo(5, 6) assert gen.next() == (5, 6) assert gen.next() == (6, 6) assert gen.next() == (5, 7) assert gen.type_yield == foo.type_yield try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") run_test(accepts, yields) run_test(yields, accepts) def test_args_pass_yield_fail(self): from typecheck import accepts, yields from typecheck import TypeCheckError, _TC_IndexError, _TC_TypeError from typecheck import _TC_GeneratorError def run_test(dec_1, dec_2): @dec_1(int, int) @dec_2(int, int) def foo(a, b): yield a, b yield a, float(b) assert foo.type_yield == (int, int) assert foo.type_args == {'a': int, 'b': int} gen = foo(5, 6) assert gen.type_yield == foo.type_yield assert gen.next() == (5, 6) try: gen.next() except TypeCheckError, e: assert isinstance(e.internal, _TC_GeneratorError) assert e.internal.yield_no == 2 assert isinstance(e.internal.inner, _TC_IndexError) assert e.internal.inner.index == 1 assert isinstance(e.internal.inner.inner, _TC_TypeError) assert e.internal.inner.inner.right == int assert e.internal.inner.inner.wrong == float self.assertEqual(str(e), "At yield #2: for (5, 6.0), at index 1, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Succeeded incorrectly") try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") run_test(accepts, yields) run_test(yields, accepts) def test_args_yield_builds_function(self): from typecheck import accepts, yields def run_test(dec_1, dec_2): @dec_1(int, int) @dec_2(int, int) def foo(a, b): yield a, float(b) assert isinstance(foo, types.FunctionType) run_test(accepts, yields) run_test(yields, accepts) def __test_doubler(self, decorator): try: @decorator(int, int) @decorator(int, int) def foo(a, b): return a, b except RuntimeError, e: self.assertEqual(str(e), 'Cannot use the same typecheck_* function more than once on the same function') else: raise AssertionError("Succeeded incorrectly") def test_double_accepts(self): from typecheck import accepts self.__test_doubler(accepts) def test_double_returns(self): from typecheck import returns self.__test_doubler(returns) def test_double_yields(self): from typecheck import yields self.__test_doubler(yields) def test_return_and_yield(self): from typecheck import yields, returns try: @yields(int, int) @returns(int, int) def foo(a, b): return a, b except RuntimeError, e: self.assertEqual(str(e), 'Cannot use returns and yields on the same function') else: raise AssertionError("Succeeded incorrectly") def test_no_double_execution_return(self): from typecheck import accepts, returns def double_execution(dec_1, dec_2): # We need to make sure that the function only gets executed once, # even if it's wrapped by two decorators usage_counter = [0] @dec_1(int) @dec_2(int) def foo(a): usage_counter[0] += 1 return a assert foo(5) == 5 assert usage_counter[0] == 1 double_execution(accepts, returns) double_execution(returns, accepts) def test_no_double_execution_yield(self): from typecheck import accepts, yields def double_execution(dec_1, dec_2): # We need to make sure that the function only gets executed once, # even if it's wrapped by two decorators usage_counter = [0] @dec_1(int) @dec_2(int) def foo(a): usage_counter[0] += 1 yield a gen = foo(5) assert gen.next() == 5 assert usage_counter[0] == 1 try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") double_execution(accepts, yields) double_execution(yields, accepts) def test_verify_args_checked_first__return(self): from typecheck import accepts, returns from typecheck import TypeCheckError, _TC_TypeError def run_test(dec_1, dec_2): @dec_1 @dec_2 def foo(a, b): return 'a' try: assert foo(5.0, 6.0) == 5.0 except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument a: for 5.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Failed to raise the proper exception") run_test(accepts(int, int), returns(float)) run_test(returns(float), accepts(int, int)) def test_verify_args_checked_first__yield(self): from typecheck import accepts, yields from typecheck import TypeCheckError, _TC_TypeError def run_test(dec_1, dec_2): @dec_1 @dec_2 def foo(a, b): yield 'a' try: assert foo(5.0, 6.0).next() == 5.0 except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.right == int assert e.internal.wrong == float self.assertEqual(str(e), "Argument a: for 5.0, expected <type 'int'>, got <type 'float'>") else: raise AssertionError("Failed to raise the proper exception") run_test(accepts(int, int), yields(float)) run_test(yields(float), accepts(int, int)) class Test_Self_class(TestCase): def test_self_in_args_pass(self): from typecheck import accepts, Self class Test(object): @accepts(Self(), int, Self()) def foo(self, a, b): return a, b t = Test() assert Test().foo(4, t) == (4, t) assert Test.foo.type_args == {'self': Self(), 'a': int, 'b': Self()} def test_self_in_args_fail(self): from typecheck import accepts, Self, TypeCheckError from typecheck import _TC_TypeError class Test(object): @accepts(Self(), int, Self()) def foo(self, a, b): return a, b assert Test.foo.type_args == {'self': Self(), 'a': int, 'b': Self()} try: assert Test().foo(4, 6) == (4, 6) except TypeCheckError, e: assert isinstance(e.internal, _TC_TypeError) assert e.internal.wrong == int assert e.internal.right == Test else: raise AssertionError("Succeeded incorrectly") def test_self_in_return_pass(self): from typecheck import returns, Self class Test(object): @returns(Self(), int, Self()) def foo(self, a, b): return self, a, b t = Test() t_1 = Test() assert t_1.foo(4, t) == (t_1, 4, t) assert Test.foo.type_return == (Self(), int, Self()) def test_self_in_return_fail(self): from typecheck import returns, Self, TypeCheckError from typecheck import _TC_IndexError, _TC_TypeError class Test(object): @returns(Self(), int, Self()) def foo(self, a, b): return self, a, b try: t = Test() assert t.foo(4, 6) == (t, 4, 6) except TypeCheckError, e: assert isinstance(e.internal, _TC_IndexError) assert e.internal.index == 2 assert isinstance(e.internal.inner, _TC_TypeError) assert e.internal.inner.wrong == int assert e.internal.inner.right == Test else: raise AssertionError("Succeeded incorrectly") assert Test.foo.type_return == (Self(), int, Self()) def test_self_in_yield_pass(self): from typecheck import yields, Self class Test(object): @yields(Self(), int, Self()) def foo(self, a, b): yield self, a, self t = Test() assert t.foo(4, 6).next() == (t, 4, t) assert Test.foo.type_yield == (Self(), int, Self()) def test_self_in_args_yield_pass(self): from typecheck import yields, Self from typecheck import accepts class Test(object): @accepts(Self(), int, int) @yields(Self(), int, Self()) def foo(self, a, b): yield self, a, self t = Test() assert t.foo(4, 6).next() == (t, 4, t) assert Test.foo.type_yield == (Self(), int, Self()) assert Test.foo.type_args == {'self': Self(), 'a': int, 'b': int} def test_self_in_args_yield_fail(self): from typecheck import yields, Self, TypeCheckError from typecheck import _TC_IndexError, _TC_TypeError from typecheck import _TC_GeneratorError, accepts class Test(object): @accepts(Self(), int, int) @yields(Self(), int, Self()) def foo(self, a, b): yield b, a, b assert Test.foo.type_yield == (Self(), int, Self()) assert Test.foo.type_args == {'self': Self(), 'a': int, 'b': int} try: assert Test().foo(4, 6).next() == (6, 4, 6) except TypeCheckError, e: assert isinstance(e.internal, _TC_GeneratorError) assert e.internal.yield_no == 1 assert isinstance(e.internal.inner, _TC_IndexError) assert e.internal.inner.index == 0 assert isinstance(e.internal.inner.inner, _TC_TypeError) assert e.internal.inner.inner.wrong == int assert e.internal.inner.inner.right == Test else: raise AssertionError("Succeeded incorrectly") class Test_enable_checking_global(TestCase): def tearDown(self): typecheck.enable_checking = True def test_accepts(self): from typecheck import accepts, TypeCheckError @accepts(int) def foo(a): return a typecheck.enable_checking = True assert foo(5) == 5 typecheck.enable_checking = False assert foo(5.0) == 5.0 typecheck.enable_checking = True try: assert foo(5.0) == 5.0 except TypeCheckError: pass # We don't need to look at this else: raise AssertionError("Succeeded incorrectly") def test_returns(self): from typecheck import returns, TypeCheckError @returns(int) def foo(a): return a typecheck.enable_checking = True assert foo(5) == 5 typecheck.enable_checking = False assert foo(5.0) == 5.0 typecheck.enable_checking = True try: assert foo(5.0) == 5.0 except TypeCheckError: pass # We don't need to look at this else: raise AssertionError("Succeeded incorrectly") def test_yields(self): from typecheck import yields, TypeCheckError @yields(int) def foo(a): yield a typecheck.enable_checking = True gen = foo(5) assert gen.next() == 5 try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") typecheck.enable_checking = False gen = foo(5.0) assert gen.next() == 5.0 try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") typecheck.enable_checking = True gen = foo(5.0) try: assert gen.next() == 5.0 except TypeCheckError: pass # We don't need to look at this else: raise AssertionError("Succeeded incorrectly") try: gen.next() except StopIteration: pass else: raise AssertionError("Failed to raise the proper exception") class Test_signature_checking_hooks(TestCase): def setUp(self): from typecheck import register_type flags = {'start': [], 'stop': []} appender_reg = {} def appender(flag, function): if isinstance(function, types.GeneratorType): if function.gi_frame is not None: appender_reg[function] = function.gi_frame.f_code.co_name flags[flag].append("gen_%s" % appender_reg[function]) else: flags[flag].append(function.__name__) class Test(object): @classmethod def __typesig__(cls, obj): pass # This is invoked when we start checking the function @classmethod def __startchecking__(cls, function): appender('start', function) @classmethod def __switchchecking__(cls, from_func, to_func): appender('stop', from_func) appender('start', to_func) # This is invoked when we stop checking the function @classmethod def __stopchecking__(cls, function): appender('stop', function) register_type(Test) self.Test = Test self.flags = flags def tearDown(self): from typecheck import is_registered_type, unregister_type if is_registered_type(self.Test): unregister_type(self.Test) def __test_single(self, decorator): @decorator(int) def foo(a): return a assert foo(5) == 5 assert self.flags['start'] == ['foo'] assert self.flags['stop'] == ['foo'] def test_args(self): from typecheck import accepts self.__test_single(accepts) def test_return(self): from typecheck import returns self.__test_single(returns) def test_yield(self): from typecheck import yields @yields(int) def foo(a): yield a gen = foo(5) assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo'] assert gen.next() == 5 assert self.flags['start'] == ['foo' , 'gen_foo'] assert self.flags['stop'] == ['foo'] try: gen.next() except StopIteration: assert self.flags['start'] == ['foo' , 'gen_foo'] assert self.flags['stop'] == ['foo', 'gen_foo'] else: raise AssertionError("Failed to raise StopIteration at the right point") def test_args_return(self): from typecheck import returns, accepts def test_double(dec_1, dec_2): self.flags['start'] = [] self.flags['stop'] = [] @dec_1(int) @dec_2(int) def foo(a): return a assert foo(5) == 5 assert self.flags['start'] == ['foo'] assert self.flags['stop'] == ['foo'] test_double(returns, accepts) test_double(accepts, returns) def test_args_yield(self): from typecheck import yields, accepts def test_double(dec_1, dec_2): self.flags['start'] = [] self.flags['stop'] = [] @dec_1(int) @dec_2(int) def foo(a): yield a gen = foo(5) assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo'] assert gen.next() == 5 assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo'] try: gen.next() except StopIteration: assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo', 'gen_foo'] else: raise AssertionError("Failed to raise StopIteration at the right point") test_double(yields, accepts) test_double(accepts, yields) def test_handles_exceptions_return(self): from typecheck import returns, accepts def test_double(dec_1, dec_2): self.flags['start'] = [] self.flags['stop'] = [] @dec_1(int) @dec_2(int) def foo(a): raise RuntimeError() try: assert foo(5) == 5 except RuntimeError: pass else: raise AssertionError("Failed to allow RuntimeError through") assert self.flags['start'] == ['foo'] assert self.flags['stop'] == ['foo'] test_double(returns, accepts) test_double(accepts, returns) def test_handles_exceptions_yield(self): from typecheck import yields, accepts def test_double(dec_1, dec_2): self.flags['start'] = [] self.flags['stop'] = [] @dec_1(int) @dec_2(int) def foo(a): yield a raise RuntimeError() gen = foo(5) assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo'] assert gen.next() == 5 assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo'] try: gen.next() except RuntimeError: # Note that gen_foo hasn't stopped checking yet assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo'] else: raise AssertionError("Failed to raise RuntimeError at the right point") try: gen.next() except StopIteration: assert self.flags['start'] == ['foo', 'gen_foo'] assert self.flags['stop'] == ['foo', 'gen_foo'] else: raise AssertionError("Failed to raise StopIteration at the right point") test_double(yields, accepts) test_double(accepts, yields) def test_nested_functions(self): from typecheck import returns, accepts def test_double(dec_1, dec_2): self.flags['start'] = [] self.flags['stop'] = [] @dec_1(int) @dec_2(int) def bar(a): return a @dec_1(int) @dec_2(int) def foo(a): return bar(a) assert foo(5) == 5 assert self.flags['start'] == ['foo', 'bar'] assert self.flags['stop'] == ['bar', 'foo'] test_double(returns, accepts) test_double(accepts, returns) def test_nested_functions_with_exceptions(self): from typecheck import returns, accepts def test_double(dec_1, dec_2): self.flags['start'] = [] self.flags['stop'] = [] @dec_1(int) @dec_2(int) def bar(a): raise RuntimeError() @dec_1(int) @dec_2(int) def foo(a): return bar(a) assert foo(5) == 5 for dec_1, dec_2 in ((returns, accepts), (accepts, returns)): try: test_double(dec_1, dec_2) except RuntimeError: assert self.flags['start'] == ['foo', 'bar'] assert self.flags['stop'] == ['bar', 'foo'] else: raise AssertionError("Failed to raise the proper exception") def test_incorrect_generator_usage(self): from typecheck import yields @yields(int) def foo(): yield 5 def contain_scope(): assert foo().next() == 5 for i in range(1, 3): contain_scope() assert self.flags['start'] == ['foo', 'gen_foo'] * i assert self.flags['stop'] == ['foo', 'gen_foo'] * i # We inherit everything else from Test_signature_checking_hooks class Test_hooks_ignore_enable_checking(Test_signature_checking_hooks): def setUp(self): Test_signature_checking_hooks.setUp(self) typecheck.enable_checking = False def tearDown(self): Test_signature_checking_hooks.tearDown(self) typecheck.enable_checking = True class TestDocStrings(TestCase): def test_typechecked_docstrings(self): import doctest import doctests finder = doctest.DocTestFinder(verbose=True) tests = finder.find(doctests) self.assertEquals(3, len(tests)) runner = doctest.DocTestRunner(doctest.OutputChecker()) for test in tests: runner.run(test) self.assertEquals(7, runner.summarize()[1]) self.assertEquals(0, runner.summarize()[0])
32.341792
183
0.555226
7,658
62,452
4.376208
0.04035
0.040014
0.081073
0.070003
0.878405
0.854445
0.813595
0.788321
0.758989
0.738848
0
0.021561
0.330862
62,452
1,930
184
32.358549
0.780397
0.018911
0
0.758597
0
0.010115
0.091035
0
0
0
0
0
0.308833
0
null
null
0.020904
0.100472
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
33db929b74c069df64b7e1d3563b1d67223ea772
68,600
py
Python
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_pinned/cmp_h264ref/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_pinned/cmp_h264ref/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_pinned/cmp_h264ref/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
power = {'BUSES': {'Area': 1.33155, 'Bus/Area': 1.33155, 'Bus/Gate Leakage': 0.00662954, 'Bus/Peak Dynamic': 0.0, 'Bus/Runtime Dynamic': 0.0, 'Bus/Subthreshold Leakage': 0.0691322, 'Bus/Subthreshold Leakage with power gating': 0.0259246, 'Gate Leakage': 0.00662954, 'Peak Dynamic': 0.0, 'Runtime Dynamic': 0.0, 'Subthreshold Leakage': 0.0691322, 'Subthreshold Leakage with power gating': 0.0259246}, 'Core': [{'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 7.84093e-05, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20275, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.000513597, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.664923, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.15141, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.660364, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.47669, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.657171, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 6.19078, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 9.70296e-05, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.024104, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.174328, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.178264, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.174425, 'Execution Unit/Register Files/Runtime Dynamic': 0.202368, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.42127, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.3277, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 4.61489, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00258275, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00258275, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00224289, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000864606, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00256077, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00996915, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0250018, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.171369, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.47111, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.582047, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96874, 'Instruction Fetch Unit/Runtime Dynamic': 1.2595, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0314366, 'L2/Runtime Dynamic': 0.0103903, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 7.61558, 'Load Store Unit/Data Cache/Runtime Dynamic': 3.07729, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.206358, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.206358, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 8.59401, 'Load Store Unit/Runtime Dynamic': 4.30134, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.508844, 'Load Store Unit/StoreQ/Runtime Dynamic': 1.01769, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.180591, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.181021, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0773544, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.8707, 'Memory Management Unit/Runtime Dynamic': 0.258375, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 29.2174, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.000339187, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.0340045, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.359798, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 0.394142, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 10.8386, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 4.62898e-05, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202725, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.000306134, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.357248, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.576228, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.290861, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.22434, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.408543, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.62308, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 5.78354e-05, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0149846, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.108373, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.11082, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.108431, 'Execution Unit/Register Files/Runtime Dynamic': 0.125805, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.228323, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.717233, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.67548, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00172773, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00172773, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00152608, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000602386, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00159194, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00657348, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0158065, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106534, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.291403, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.361839, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96396, 'Instruction Fetch Unit/Runtime Dynamic': 0.782156, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0191765, 'L2/Runtime Dynamic': 0.00635795, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 5.16026, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.89266, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.126923, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.126923, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 5.75962, 'Load Store Unit/Runtime Dynamic': 2.64553, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.312971, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.625941, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.111074, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.111337, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0478454, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.74691, 'Memory Management Unit/Runtime Dynamic': 0.159182, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 23.7022, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.000151961, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0161199, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.188617, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.204888, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 6.47359, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 4.62898e-05, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202725, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.000306134, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.357853, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.577204, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.291353, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.22641, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.409234, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.62443, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 5.78354e-05, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.01501, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.108556, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.111008, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.108614, 'Execution Unit/Register Files/Runtime Dynamic': 0.126018, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.228709, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.718352, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.67888, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00173198, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00173198, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0015298, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000603829, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00159464, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00658839, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.015847, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106715, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.292076, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.362451, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96396, 'Instruction Fetch Unit/Runtime Dynamic': 0.783677, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0192223, 'L2/Runtime Dynamic': 0.00636091, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 5.16592, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.89538, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.127106, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.127106, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 5.76614, 'Load Store Unit/Runtime Dynamic': 2.64933, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.313422, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.626844, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.111234, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.111498, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0479561, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.747185, 'Memory Management Unit/Runtime Dynamic': 0.159454, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 23.7104, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.000152242, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0161472, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.188932, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.205232, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 6.48294, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 4.62898e-05, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202725, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.000306134, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.35749, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.576618, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.291058, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.22517, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.40882, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.62362, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 5.78354e-05, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0149947, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.108446, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.110895, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.108504, 'Execution Unit/Register Files/Runtime Dynamic': 0.12589, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.228478, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.71769, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.67685, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00172926, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00172926, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00152742, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000602905, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00159302, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00657896, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0158212, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106607, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.291658, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.362084, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96396, 'Instruction Fetch Unit/Runtime Dynamic': 0.782748, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0191918, 'L2/Runtime Dynamic': 0.00635756, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 5.16259, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.89378, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.126998, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.126998, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 5.76231, 'Load Store Unit/Runtime Dynamic': 2.64709, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.313156, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.626313, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.11114, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.111403, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0478873, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.747023, 'Memory Management Unit/Runtime Dynamic': 0.159291, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 23.7056, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.000152046, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0161308, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.188743, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.205026, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 6.47736, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}], 'DRAM': {'Area': 0, 'Gate Leakage': 0, 'Peak Dynamic': 1.5186381047674296, 'Runtime Dynamic': 1.5186381047674296, 'Subthreshold Leakage': 4.252, 'Subthreshold Leakage with power gating': 4.252}, 'L3': [{'Area': 61.9075, 'Gate Leakage': 0.0484137, 'Peak Dynamic': 0.114824, 'Runtime Dynamic': 0.0707952, 'Subthreshold Leakage': 6.80085, 'Subthreshold Leakage with power gating': 3.32364}], 'Processor': {'Area': 191.908, 'Gate Leakage': 1.53485, 'Peak Dynamic': 100.45, 'Peak Power': 133.563, 'Runtime Dynamic': 30.3433, 'Subthreshold Leakage': 31.5774, 'Subthreshold Leakage with power gating': 13.9484, 'Total Cores/Area': 128.669, 'Total Cores/Gate Leakage': 1.4798, 'Total Cores/Peak Dynamic': 100.336, 'Total Cores/Runtime Dynamic': 30.2725, 'Total Cores/Subthreshold Leakage': 24.7074, 'Total Cores/Subthreshold Leakage with power gating': 10.2429, 'Total L3s/Area': 61.9075, 'Total L3s/Gate Leakage': 0.0484137, 'Total L3s/Peak Dynamic': 0.114824, 'Total L3s/Runtime Dynamic': 0.0707952, 'Total L3s/Subthreshold Leakage': 6.80085, 'Total L3s/Subthreshold Leakage with power gating': 3.32364, 'Total Leakage': 33.1122, 'Total NoCs/Area': 1.33155, 'Total NoCs/Gate Leakage': 0.00662954, 'Total NoCs/Peak Dynamic': 0.0, 'Total NoCs/Runtime Dynamic': 0.0, 'Total NoCs/Subthreshold Leakage': 0.0691322, 'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
75.054705
124
0.681924
8,090
68,600
5.776514
0.065389
0.123598
0.112985
0.093469
0.943401
0.934798
0.923693
0.898849
0.877728
0.858555
0
0.1315
0.224359
68,600
914
125
75.054705
0.746772
0
0
0.667396
0
0
0.657498
0.048104
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
33e2f030cce2d6d17ca36dd1fbc70db246927ef6
187
py
Python
spiketag/fpga/__init__.py
aliddell/spiketag
f5600126c2c6c9be319e8b808d51ea33be843909
[ "BSD-3-Clause" ]
null
null
null
spiketag/fpga/__init__.py
aliddell/spiketag
f5600126c2c6c9be319e8b808d51ea33be843909
[ "BSD-3-Clause" ]
null
null
null
spiketag/fpga/__init__.py
aliddell/spiketag
f5600126c2c6c9be319e8b808d51ea33be843909
[ "BSD-3-Clause" ]
null
null
null
from .bram_thres import threshold from .bram_thres import offset from .bram_thres import channel_hash from .memory_api import * from .configFPGA import xike_config from .run import run
23.375
37
0.823529
29
187
5.103448
0.482759
0.162162
0.263514
0.385135
0
0
0
0
0
0
0
0
0.139037
187
7
38
26.714286
0.919255
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
1d270f2c1a96ef4619ef33bf9ea4f0fcb36d55a8
144
py
Python
earthworm/__init__.py
shubhamt619/earthworm
6459cc96731b54ff7844fc60a68331410f4b999b
[ "MIT" ]
1
2019-09-09T06:58:45.000Z
2019-09-09T06:58:45.000Z
earthworm/__init__.py
shubhamt619/earthworm
6459cc96731b54ff7844fc60a68331410f4b999b
[ "MIT" ]
null
null
null
earthworm/__init__.py
shubhamt619/earthworm
6459cc96731b54ff7844fc60a68331410f4b999b
[ "MIT" ]
null
null
null
from .hello import * #importing from a file (It will be used as a module) from .hi import * #importing from a file (It will be used as a module)
72
73
0.729167
28
144
3.75
0.464286
0.285714
0.361905
0.380952
0.857143
0.857143
0.857143
0.857143
0.857143
0.857143
0
0
0.201389
144
2
74
72
0.913043
0.708333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
14
1d4315b7f083e36284e174e8921d7f77139927c7
23,640
py
Python
membership/tests/tests_invitation_api.py
810Teams/clubs-and-events-backend
cf429f43251ad7e77c0d9bc9fe91bb030ca8bae8
[ "MIT" ]
1
2021-06-25T17:16:13.000Z
2021-06-25T17:16:13.000Z
membership/tests/tests_invitation_api.py
810Teams/clubs-and-events-backend
cf429f43251ad7e77c0d9bc9fe91bb030ca8bae8
[ "MIT" ]
null
null
null
membership/tests/tests_invitation_api.py
810Teams/clubs-and-events-backend
cf429f43251ad7e77c0d9bc9fe91bb030ca8bae8
[ "MIT" ]
null
null
null
''' Membership Application's Invitation API Test membership/tests/tests_invitation_api.py @author Teerapat Kraisrisirikul (810Teams) ''' from django.contrib.auth import get_user_model from rest_framework import status from rest_framework.test import APITestCase from community.models import Club, Event, Lab, CommunityEvent from membership.models import Membership, Invitation import datetime class InvitationAPITest(APITestCase): ''' Invitation API test ''' def setUp(self): self.user_01 = get_user_model().objects.create_user(username='user_01', password='12345678', name='User One') self.user_02 = get_user_model().objects.create_user(username='user_02', password='12345678', name='User Two') self.user_03 = get_user_model().objects.create_user(username='user_03', password='12345678', name='User Three') self.user_04 = get_user_model().objects.create_user(username='user_04', password='12345678', name='User Four') self.user_05 = get_user_model().objects.create_user(username='user_05', password='12345678', name='User Five') self.user_06 = get_user_model().objects.create_user(username='user_06', password='12345678', name='User Six') self.user_07 = get_user_model().objects.create_user(username='user_07', password='12345678', name='User Seven') self.lecturer = get_user_model().objects.create_user( username='lecturer', password='12345678', name='Prof.Lazy Bones', user_group='lecturer' ) self.support_staff = get_user_model().objects.create_user( username='support', password='12345678', name='Mr.Supporter', user_group='support' ) self.club = Club.objects.create(name_th='ชุมนุมทดสอบคำเชิญ', name_en='Invitation Testing Club') self.event = Event.objects.create( name_th='กิจกรรมทดสอบคำเชิญ', name_en='Invitation Testing Event', is_approved=True, location='L207 IT KMITL', start_date=datetime.date(2020, 12, 1), end_date=datetime.date(2020, 12, 2), start_time=datetime.time(9, 0, 0), end_time=datetime.time(17, 0, 0) ) self.lab = Lab.objects.create(name_th='ห้องปฏิบัติการทดสอบคำเชิญ', name_en='Invitation Testing Lab') self.community_event_allows_outside = CommunityEvent.objects.create( name_th='กิจกรรมชุมนุมทดสอบคำเชิญ', name_en='Invitation Testing Club Event', is_approved=True, location='L207 IT KMITL', start_date=datetime.date(2020, 12, 1), end_date=datetime.date(2020, 12, 2), start_time=datetime.time(9, 0, 0), end_time=datetime.time(17, 0, 0), created_under_id=self.club.id, allows_outside_participators=True ) self.community_event_disallows_outside = CommunityEvent.objects.create( name_th='กิจกรรมห้องปฏิบัติการทดสอบคำเชิญ', name_en='Invitation Testing Lab Event', is_approved=True, location='L207 IT KMITL', start_date=datetime.date(2020, 12, 1), end_date=datetime.date(2020, 12, 2), start_time=datetime.time(9, 0, 0), end_time=datetime.time(17, 0, 0), created_under_id=self.club.id, allows_outside_participators=False ) Membership.objects.create(community_id=self.club.id, user_id=self.user_01.id, position=3) Membership.objects.create(community_id=self.club.id, user_id=self.user_02.id, position=2) Membership.objects.create(community_id=self.club.id, user_id=self.user_03.id, position=1) Membership.objects.create(community_id=self.club.id, user_id=self.user_04.id, position=0) Membership.objects.create(community_id=self.event.id, user_id=self.user_01.id, position=3) Membership.objects.create(community_id=self.lab.id, user_id=self.user_01.id, position=3) Membership.objects.create( community_id=self.community_event_allows_outside.id, user_id=self.user_01.id, position=3 ) Membership.objects.create( community_id=self.community_event_disallows_outside.id, user_id=self.user_01.id, position=3 ) def test_retrieve_invitation_as_member(self): ''' Test retrieve invitation as different member positions ''' invitation = Invitation.objects.create( invitor_id=self.user_01.id, invitee_id=self.user_05.id, community_id=self.club.id ) # Access invitation as leader position self.client.login(username='user_01', password='12345678') response = self.client.get('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.client.logout() # Access invitation as deputy leader position self.client.login(username='user_02', password='12345678') response = self.client.get('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.client.logout() # Access invitation as staff position self.client.login(username='user_03', password='12345678') response = self.client.get('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.client.logout() # Access invitation as member position self.client.login(username='user_04', password='12345678') response = self.client.get('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.client.logout() def test_retrieve_invitation_as_receiver(self): ''' Test retrieve invitation as receiver and non-receiver ''' invitation = Invitation.objects.create( invitor_id=self.user_01.id, invitee_id=self.user_05.id, community_id=self.club.id ) # Access invitation as receiver self.client.login(username='user_05', password='12345678') response = self.client.get('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.client.logout() # Access invitation as someone else being both non-member and not invitation sender self.client.login(username='user_06', password='12345678') response = self.client.get('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.client.logout() def test_send_invitation_to_non_member(self): ''' Test send invitation to user which is not a member ''' self.client.login(username='user_01', password='12345678') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_05.id, 'community': self.club.id }) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.client.logout() def test_send_invitation_to_left_member(self): ''' Test send invitation to user which has already left the community ''' self.client.login(username='user_01', password='12345678') Membership.objects.create(user_id=self.user_06.id, community_id=self.club.id, status='L') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_06.id, 'community': self.club.id }) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.client.logout() def test_send_invitation_to_removed_member(self): ''' Test send invitation to user which has been removed from the community ''' self.client.login(username='user_01', password='12345678') Membership.objects.create(user_id=self.user_07.id, community_id=self.club.id, status='X') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_07.id, 'community': self.club.id }) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.client.logout() def test_send_invitation_to_active_member(self): ''' Test send invitation to user which is already a member ''' self.client.login(username='user_01', password='12345678') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_04.id, 'community': self.club.id }) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_send_invitation_to_retired_member(self): ''' Test send invitation to user which has already retired ''' self.client.login(username='user_01', password='12345678') Membership.objects.create(user_id=self.user_05.id, community_id=self.club.id, status='R') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_05.id, 'community': self.club.id }) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_send_invitation_to_non_base_member(self): ''' Test send invitation to non-members of base community ''' self.client.login(username='user_01', password='12345678') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_05.id, 'community': self.community_event_allows_outside.id }) self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_05.id, 'community': self.community_event_disallows_outside.id }) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_send_invitation_duplicate(self): ''' Test send duplicate invitation ''' self.client.login(username='user_01', password='12345678') Invitation.objects.create(invitor_id=self.user_01.id, invitee_id=self.user_05.id, community_id=self.club.id) response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_05.id, 'community': self.club.id }) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_send_invitation_to_student(self): ''' Test send invitation to student ''' self._test_send_invitation_to( 'user_05', club=True, event=True, lab=True, community_event_allows_outside=True, community_event_disallows_outside=False ) def test_send_invitation_to_lecturer(self): ''' Test send invitation to lecturer ''' self._test_send_invitation_to( 'lecturer', club=False, event=True, lab=True, community_event_allows_outside=True, community_event_disallows_outside=False ) def test_send_invitation_to_support(self): ''' Test send invitation to support staff ''' self._test_send_invitation_to( 'support', club=False, event=True, lab=False, community_event_allows_outside=True, community_event_disallows_outside=False ) def _test_send_invitation_to(self, username, club=True, event=True, lab=True, community_event_allows_outside=True, community_event_disallows_outside=False): ''' Test send invitation to different user groups ''' self.client.login(username='user_01', password='12345678') invitee = get_user_model().objects.get(username=username) # Invitation from club self.client.post('/api/membership/invitation/', { 'invitee': invitee.id, 'community': self.club.id }) self.assertEqual(len(Invitation.objects.filter(invitee_id=invitee.id, community_id=self.club.id)), int(club)) # Invitation from event self.client.post('/api/membership/invitation/', { 'invitee': invitee.id, 'community': self.event.id }) self.assertEqual(len(Invitation.objects.filter(invitee_id=invitee.id, community_id=self.event.id)), int(event)) # Invitation from lab self.client.post('/api/membership/invitation/', { 'invitee': invitee.id, 'community': self.lab.id }) self.assertEqual(len(Invitation.objects.filter(invitee_id=invitee.id, community_id=self.lab.id)), int(lab)) # Invitation from community event that allows outside participators self.client.post('/api/membership/invitation/', { 'invitee': invitee.id, 'community': self.community_event_allows_outside.id }) self.assertEqual( len(Invitation.objects.filter(invitee_id=invitee.id, community_id=self.community_event_allows_outside.id)), int(community_event_allows_outside) ) # Invitation from community event that does not allow outside participators self.client.post('/api/membership/invitation/', { 'invitee': invitee.id, 'community': self.community_event_disallows_outside.id }) self.assertEqual( len(Invitation.objects.filter( invitee_id=invitee.id, community_id=self.community_event_disallows_outside.id) ), int(community_event_disallows_outside) ) self.client.logout() def test_send_invitation_as_leader(self): ''' Test send invitation as leader ''' self._test_send_invitation_as('user_01', allows_sending=True) def test_send_invitation_as_deputy_leader(self): ''' Test send invitation as leader ''' self._test_send_invitation_as('user_02', allows_sending=True) def test_send_invitation_as_staff(self): ''' Test send invitation as leader ''' self._test_send_invitation_as('user_03', allows_sending=True) def test_send_invitation_as_member(self): ''' Test send invitation as leader ''' self._test_send_invitation_as('user_04', allows_sending=False) def test_send_invitation_as_non_member(self): ''' Test send invitation as leader ''' self._test_send_invitation_as('user_05', allows_sending=False) def _test_send_invitation_as(self, username, allows_sending=True): ''' Test send invitation as different membership positions ''' self.client.login(username=username, password='12345678') response = self.client.post('/api/membership/invitation/', { 'invitee': self.user_06.id, 'community': self.club.id }) if allows_sending: self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(len(Invitation.objects.filter(invitee_id=self.user_06.id, community_id=self.club.id)), 1) else: self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(len(Invitation.objects.filter(invitee_id=self.user_06.id, community_id=self.club.id)), 0) self.client.logout() def test_accept_invitation_own(self): ''' Test accept own invitation ''' self.client.login(username='user_05', password='12345678') invitation = Invitation.objects.create( invitor_id=self.user_01.id, invitee_id=self.user_05.id, community_id=self.club.id ) response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'A' }) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Invitation.objects.get(id=invitation.id).status, 'A') self.assertEqual(len(Membership.objects.filter(user_id=self.user_05.id, community_id=self.club.id)), 1) self.client.logout() def test_decline_invitation_own(self): ''' Test decline own invitation ''' self.client.login(username='user_05', password='12345678') invitation = Invitation.objects.create( invitor_id=self.user_01.id, invitee_id=self.user_05.id, community_id=self.club.id ) response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'D' }) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(Membership.objects.filter(user_id=self.user_05.id, community_id=self.club.id)), 0) self.client.logout() def test_accept_invitation_other(self): ''' Test accept own invitation ''' self.client.login(username='user_05', password='12345678') invitation = Invitation.objects.create( invitor_id=self.user_01.id, invitee_id=self.user_06.id, community_id=self.club.id ) response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'A' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(len(Membership.objects.filter(user_id=self.user_05.id, community_id=self.club.id)), 0) self.client.logout() def test_decline_invitation_other(self): ''' Test decline own invitation ''' self.client.login(username='user_05', password='12345678') invitation = Invitation.objects.create( invitor_id=self.user_01.id, invitee_id=self.user_06.id, community_id=self.club.id ) response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'D' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(len(Membership.objects.filter(user_id=self.user_05.id, community_id=self.club.id)), 0) self.client.logout() def test_respond_invitation_already_accepted(self): ''' Test respond to invitation that is already accepted ''' self.client.login(username='user_05', password='12345678') invitation = Invitation.objects.create(invitee_id=self.user_05.id, community_id=self.club.id, status='A') response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'W' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(Invitation.objects.get(pk=invitation.id).status, 'A') response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'A' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(Invitation.objects.get(pk=invitation.id).status, 'A') response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'D' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(Invitation.objects.get(pk=invitation.id).status, 'A') self.client.logout() def test_respond_invitation_already_declined(self): ''' Test respond to invitation that is already accepted ''' self.client.login(username='user_05', password='12345678') invitation = Invitation.objects.create(invitee_id=self.user_05.id, community_id=self.club.id, status='D') response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'W' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(Invitation.objects.get(pk=invitation.id).status, 'D') response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'A' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(Invitation.objects.get(pk=invitation.id).status, 'D') response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'D' }) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(Invitation.objects.get(pk=invitation.id).status, 'D') self.client.logout() def test_respond_invitation_renew_membership(self): ''' Test respond to invitation that renews membership ''' self.client.login(username='user_05', password='12345678') membership = Membership.objects.create(user_id=self.user_05.id, community_id=self.club.id, status='L') invitation = Invitation.objects.create(invitee_id=self.user_05.id, community_id=self.club.id) response = self.client.patch('/api/membership/invitation/{}/'.format(invitation.id), { 'status': 'A' }) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Membership.objects.get(pk=membership.id).status, 'A') self.assertEqual(len(Membership.objects.filter(user_id=self.user_05.id, community_id=self.club.id)), 1) self.client.logout() def test_cancel_own_invitation(self): ''' Test cancel own invitation ''' self.client.login(username='user_03', password='12345678') invitation = Invitation.objects.create( invitor_id=self.user_03.id, invitee_id=self.user_06.id, community_id=self.club.id ) response = self.client.delete('/api/membership/invitation/{}/'.format(invitation.id)) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(len(Invitation.objects.filter(invitee_id=self.user_06.id, community_id=self.club.id)), 0) self.client.logout() def test_cancel_invitation_as_leader(self): ''' Test cancel invitation as leader ''' self._test_cancel_invitation_as('user_01', allows_cancel=True) def test_cancel_invitation_as_deputy_leader(self): ''' Test cancel invitation as deputy leader ''' self._test_cancel_invitation_as('user_02', allows_cancel=True) def test_cancel_invitation_as_staff(self): ''' Test cancel invitation as staff ''' self._test_cancel_invitation_as('user_03', allows_cancel=False) def test_cancel_invitation_as_member(self): ''' Test cancel invitation as member ''' self._test_cancel_invitation_as('user_04', allows_cancel=False) def test_cancel_invitation_as_non_member(self): ''' Test cancel invitation as leader ''' self._test_cancel_invitation_as('user_05', allows_cancel=False) def _test_cancel_invitation_as(self, username, allows_cancel=True): ''' Test cancel invitation as different membership positions ''' self.client.login(username=username, password='12345678') invitation = Invitation.objects.create( invitor_id=None, invitee_id=self.user_06.id, community_id=self.club.id ) response = self.client.delete('/api/membership/invitation/{}/'.format(invitation.id)) if allows_cancel: self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(len(Invitation.objects.filter(invitee_id=self.user_06.id, community_id=self.club.id)), 0) else: self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(len(Invitation.objects.filter(invitee_id=self.user_06.id, community_id=self.club.id)), 1) self.client.logout()
46.171875
119
0.672843
2,944
23,640
5.202106
0.067595
0.040353
0.027424
0.025857
0.903363
0.863859
0.845119
0.827359
0.752726
0.741365
0
0.034211
0.204949
23,640
511
120
46.262231
0.779303
0.085575
0
0.622283
0
0
0.103914
0.048122
0
0
0
0
0.144022
1
0.092391
false
0.089674
0.016304
0
0.111413
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
1d533c04dd33f70df222a8b4a78c4ecdda08d7bb
749
py
Python
scripts/5d_gen_xlsx_report.py
co-demos/ODAMAP-temp
2ca11d8990930f7435abbf208c1552fa7e77bad5
[ "MIT" ]
null
null
null
scripts/5d_gen_xlsx_report.py
co-demos/ODAMAP-temp
2ca11d8990930f7435abbf208c1552fa7e77bad5
[ "MIT" ]
1
2021-05-11T14:43:12.000Z
2021-05-11T14:43:12.000Z
scripts/5d_gen_xlsx_report.py
co-demos/ODAMAP-temp
2ca11d8990930f7435abbf208c1552fa7e77bad5
[ "MIT" ]
null
null
null
import pandas as pd read_file = pd.read_csv ("../published-data/reports-echeances-regional-naf-latest.csv",dtype={'dispositif':str,'nombre_reports':float,'montant_total':float,'reg':str,'libelle_region':str,'code_section':str,'libelle_section':str}) read_file.to_excel ('../published-data/reports-echeances-regional-naf-latest.xlsx', index = None, header=True) read_file = pd.read_csv ("../published-data/reports-echeances-departemental-naf-latest.csv",dtype={'dispositif':str,'nombre_reports':float,'montant_total':float,'reg':str,'libelle_region':str,'dep':str,'libelle_departement':str,'code_section':str,'libelle_section':str}) read_file.to_excel ('../published-data/reports-echeances-departemental-naf-latest.xlsx', index = None, header=True)
107
270
0.779706
107
749
5.280374
0.336449
0.088496
0.141593
0.20531
0.929204
0.929204
0.929204
0.741593
0.741593
0.578761
0
0
0.033378
749
7
271
107
0.780387
0
0
0
0
0
0.576
0.330667
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
1d5cb301076b03922296154f53f6bf57ad1eb3d1
17,218
py
Python
core/domain/feedback_jobs_one_off_test.py
kiranmayee-dobbali/oppia
135742d147eedba66f2bd3925ea037e614f7e0fe
[ "Apache-2.0" ]
1
2020-09-30T17:55:05.000Z
2020-09-30T17:55:05.000Z
core/domain/feedback_jobs_one_off_test.py
gitter-badger/oppia
7d8e659264582d7ce74bc6c139e597b82bca0e04
[ "Apache-2.0" ]
null
null
null
core/domain/feedback_jobs_one_off_test.py
gitter-badger/oppia
7d8e659264582d7ce74bc6c139e597b82bca0e04
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Feedback-related jobs.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import ast from core.domain import feedback_jobs_one_off from core.domain import feedback_services from core.domain import taskqueue_services from core.platform import models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) class FeedbackThreadCacheOneOffJobTest(test_utils.GenericTestBase): """Tests for one-off job to populate the caches of FeedbackThreads.""" def setUp(self): super(FeedbackThreadCacheOneOffJobTest, self).setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) def _run_one_off_job(self): """Runs the one-off job under test and returns its output.""" job_id = ( feedback_jobs_one_off.FeedbackThreadCacheOneOffJob.create_new()) feedback_jobs_one_off.FeedbackThreadCacheOneOffJob.enqueue(job_id) self.assertEqual( 1, self.count_jobs_in_mapreduce_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS)) self.process_and_flush_pending_mapreduce_tasks() self.assertEqual( 0, self.count_jobs_in_mapreduce_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS)) job_output = ( feedback_jobs_one_off.FeedbackThreadCacheOneOffJob.get_output( job_id)) job_output_pairs = [ast.literal_eval(o) for o in job_output] return [(key, int(val)) for key, val in job_output_pairs] def _create_thread(self, author_id, text): """Helper wrapper for feedback_services.create_thread which only exposes arguments relevant to the cache. Args: author_id: str|None. ID of the user which created this thread, or None if the author was anonymous (not logged in). text: str. Content of the first message in the thread (allowed to be an empty string). Returns: str. The ID of the newly created thread. """ return feedback_services.create_thread( 'exploration', 'exp_id', author_id, 'subject', text) def _create_message(self, thread_id, author_id, text): """Helper wrapper for feedback_services.create_message which only exposes arguments relevant to the cache. Args: thread_id: str. ID of the thread to which this message should be appened to. author_id: str|None. ID of the user which created this message, or None if the author was anonymous (not logged in). text: str. Content of the first message in the thread (allowed to be an empty string). """ feedback_services.create_message(thread_id, author_id, None, None, text) def test_cache_update_to_thread_with_1_message(self): thread_id = self._create_thread(self.editor_id, 'first text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_text = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'first text') def test_cache_update_to_thread_with_2_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, self.editor_id, 'second text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_text = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'second text') def test_cache_update_to_thread_with_1_empty_message(self): thread_id = self._create_thread(self.editor_id, '') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_text = 'Non-empty' model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, None) def test_cache_update_to_thread_with_2_empty_messages(self): thread_id = self._create_thread(self.editor_id, '') self._create_message(thread_id, self.editor_id, '') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_text = 'Non-empty' model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, None) def test_cache_update_to_thread_with_empty_then_nonempty_messages(self): thread_id = self._create_thread(self.editor_id, '') self._create_message(thread_id, self.editor_id, 'first text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_text = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'first text') def test_cache_update_to_thread_with_nonempty_then_empty_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, self.editor_id, '') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_text = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'first text') def test_cache_update_to_thread_with_1_user_message(self): thread_id = self._create_thread(self.editor_id, 'first text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_cache_update_to_thread_with_2_user_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, self.editor_id, 'second text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_cache_update_to_thread_with_1_anon_message(self): thread_id = self._create_thread(None, 'first text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = self.editor_id model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_cache_update_to_thread_with_2_anon_messages(self): thread_id = self._create_thread(None, 'first text') self._create_message(thread_id, None, 'second text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = self.editor_id model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_cache_update_to_thread_with_user_then_anon_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, None, 'second text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = self.editor_id model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_cache_update_to_thread_with_anon_then_user_messages(self): thread_id = self._create_thread(None, 'first text') self._create_message(thread_id, self.editor_id, 'second text') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_cache_update_to_thread_with_user_then_empty_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, None, '') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = None model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_cache_update_to_thread_with_anon_then_empty_messages(self): thread_id = self._create_thread(None, 'first text') self._create_message(thread_id, self.editor_id, '') model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) model.last_nonempty_message_author_id = self.editor_id model.put() self.assertEqual(self._run_one_off_job(), [('Updated', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_no_cache_update_to_thread_with_1_message(self): thread_id = self._create_thread(self.editor_id, 'first text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'first text') def test_no_cache_update_to_thread_with_2_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, self.editor_id, 'second text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'second text') def test_no_cache_update_to_thread_with_1_empty_message(self): thread_id = self._create_thread(self.editor_id, '') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, None) def test_no_cache_update_to_thread_with_2_empty_messages(self): thread_id = self._create_thread(self.editor_id, '') self._create_message(thread_id, self.editor_id, '') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, None) def test_no_cache_update_to_thread_with_empty_then_nonempty_messages(self): thread_id = self._create_thread(self.editor_id, '') self._create_message(thread_id, self.editor_id, 'first text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'first text') def test_no_cache_update_to_thread_with_nonempty_then_empty_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, self.editor_id, '') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_text, 'first text') def test_no_cache_update_to_thread_with_1_user_message(self): thread_id = self._create_thread(self.editor_id, 'first text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_no_cache_update_to_thread_with_2_user_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, self.editor_id, 'second text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_no_cache_update_to_thread_with_1_anon_message(self): thread_id = self._create_thread(None, 'first text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_no_cache_update_to_thread_with_2_anon_messages(self): thread_id = self._create_thread(None, 'first text') self._create_message(thread_id, None, 'second text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_no_cache_update_to_thread_with_user_then_anon_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, None, 'second text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id) def test_no_cache_update_to_thread_with_anon_then_user_messages(self): thread_id = self._create_thread(None, 'first text') self._create_message(thread_id, self.editor_id, 'second text') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_no_cache_update_to_thread_with_user_then_empty_messages(self): thread_id = self._create_thread(self.editor_id, 'first text') self._create_message(thread_id, None, '') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertEqual(model.last_nonempty_message_author_id, self.editor_id) def test_no_cache_update_to_thread_with_anon_then_empty_messages(self): thread_id = self._create_thread(None, 'first text') self._create_message(thread_id, self.editor_id, '') self.assertEqual(self._run_one_off_job(), [('Already up-to-date', 1)]) model = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) self.assertIsNone(model.last_nonempty_message_author_id)
46.037433
80
0.731967
2,294
17,218
5.074106
0.081081
0.063918
0.072165
0.162371
0.866667
0.844845
0.844845
0.844845
0.844845
0.827234
0
0.00381
0.176908
17,218
373
81
46.160858
0.817528
0.09432
0
0.753138
0
0
0.053479
0
0
0
0
0
0.242678
1
0.133891
false
0
0.037657
0
0.1841
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
d52ef815943b21c268fbe4d47dd0bd7b46e1f4d5
8,960
py
Python
src/utils/user_agent.py
dTCTb/ReconT
f13537792aac4132d35ca3f7cf06db9af05195d3
[ "Apache-2.0" ]
15
2019-10-25T22:34:19.000Z
2022-03-10T14:44:03.000Z
src/utils/user_agent.py
dTCTb/ReconT
f13537792aac4132d35ca3f7cf06db9af05195d3
[ "Apache-2.0" ]
null
null
null
src/utils/user_agent.py
dTCTb/ReconT
f13537792aac4132d35ca3f7cf06db9af05195d3
[ "Apache-2.0" ]
11
2019-10-26T14:52:14.000Z
2022-03-29T15:44:50.000Z
# -*- coding: utf-8 -*- import random user_agents = [ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (X11; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 YaBrowser/18.11.1.805 Yowser/2.5 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36', 'Mozilla/5.0 (iPad; CPU OS 12_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1', 'Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.1 Safari/605.1.15', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 YaBrowser/18.11.1.805 Yowser/2.5 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.98 Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15', 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.106', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0', 'Mozilla/5.0 (Windows NT 6.1; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0', 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0', 'Mozilla/5.0 (X11; CrOS x86_64 11151.59.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.94 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0', 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0', 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.22 (KHTML, like Gecko) Chrome/18.0.1025.133 Safari/535.22 Midori/0.4', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4 (Splashtop-v1.3.45.0)', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.34 (KHTML, like Gecko) konqueror/4.10.5 Safari/534.34', 'Opera/9.80 (Linux mips ; U; InettvBrowser/2.2 (00014A;SonyDTV115;0002;0100) KDL55EX720; CC/USA; en) Presto/2.5.21 Version/10.30', 'Mozilla/5.0 (Linux armv7l) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36 OPR/31.0.1890.0 OMI/4.6.1.40.Dominik2.193 VSTVB MB100 FVC/1.0 (LUXOR; MB110; ) HbbTV/1.3.1 (; LUXOR; MB110; 2.32.4.1; ;) SmartTvA/3.0.0', 'Mozilla/5.0 (X11; Linux i686; rv:34.0; 9wnC4ytpyRZfcJm/x9JjlWs9epZMnsOWCeRi00sqfos=) Gecko/20100101 Firefox/34.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.2992.0 Safari/537.36', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.11) Gecko/20100721 Conkeror/0.9.2 (Debian-0.9.2+git100804-1)', 'Mozilla/5.0 (X11; Linux x86_64) (KHTML, like Gecko) Version/11.0 Safari/604.1.38 QupZilla/1.8.9', ] def useragent(): return random.choice(user_agents)
94.315789
240
0.708371
1,786
8,960
3.523516
0.091825
0.100429
0.112983
0.123471
0.870968
0.86477
0.86191
0.850787
0.838233
0.831877
0
0.259479
0.116964
8,960
94
241
95.319149
0.535895
0.002344
0
0
0
0.941176
0.93878
0.023391
0
0
0
0
0
1
0.011765
false
0
0.011765
0.011765
0.035294
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
63c6b104d5fdcb4acce31de0ac54929ea327d860
4,726
py
Python
services/ui/models/label.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
84
2017-10-22T11:01:39.000Z
2022-02-27T03:43:48.000Z
services/ui/models/label.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
22
2017-12-11T07:21:56.000Z
2021-09-23T02:53:50.000Z
services/ui/models/label.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
23
2017-12-06T06:59:52.000Z
2022-02-24T00:02:25.000Z
# ---------------------------------------------------------------------- # DefaultServiceItem # ---------------------------------------------------------------------- # Copyright (C) 2007-2021 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # Python modules from typing import Optional # Third-party modules from pydantic import BaseModel # NOC modules from .utils import Reference class LabelItem(BaseModel): id: str name: str is_protected: bool scope: str value: str bg_color1: str fg_color1: str bg_color2: str fg_color2: str class LabelLabelItem(BaseModel): id: str label: str is_protected: bool scope: str value: str bg_color1: str fg_color1: str bg_color2: str fg_color2: str class DefaultLabelItem(BaseModel): id: str name: str description: Optional[str] bg_color1: Optional[int] fg_color1: Optional[int] bg_color2: Optional[int] fg_color2: Optional[int] is_protected: Optional[bool] is_autogenerated: Optional[bool] enable_agent: Optional[bool] enable_service: Optional[bool] enable_serviceprofile: Optional[bool] enable_managedobject: Optional[bool] enable_managedobjectprofile: Optional[bool] enable_administrativedomain: Optional[bool] enable_authprofile: Optional[bool] enable_commandsnippet: Optional[bool] enable_allocationgroup: Optional[bool] enable_networksegment: Optional[bool] enable_object: Optional[bool] enable_objectmodel: Optional[bool] enable_platform: Optional[bool] enable_resourcegroup: Optional[bool] enable_sensor: Optional[bool] enable_sensorprofile: Optional[bool] enable_subscriber: Optional[bool] enable_subscriberprofile: Optional[bool] enable_supplier: Optional[bool] enable_supplierprofile: Optional[bool] enable_dnszone: Optional[bool] enable_dnszonerecord: Optional[bool] enable_division: Optional[bool] enable_kbentry: Optional[bool] enable_ipaddress: Optional[bool] enable_addressprofile: Optional[bool] enable_ipaddressrange: Optional[bool] enable_ipprefix: Optional[bool] enable_prefixprofile: Optional[bool] enable_vrf: Optional[bool] enable_vrfgroup: Optional[bool] enable_asn: Optional[bool] enable_assetpeer: Optional[bool] enable_peer: Optional[bool] enable_vc: Optional[bool] enable_vlan: Optional[bool] enable_vlanprofile: Optional[bool] enable_vpn: Optional[bool] enable_vpnprofile: Optional[bool] enable_slaprobe: Optional[bool] enable_slaprofile: Optional[bool] enable_alarm: Optional[bool] expose_metric: Optional[bool] expose_datastream: Optional[bool] remote_system: Optional[Reference] remote_id: Optional[str] class FormLabelItem(BaseModel): name: str description: Optional[str] bg_color1: Optional[int] fg_color1: Optional[int] bg_color2: Optional[int] fg_color2: Optional[int] is_protected: Optional[bool] enable_agent: Optional[bool] enable_service: Optional[bool] enable_serviceprofile: Optional[bool] enable_managedobject: Optional[bool] enable_managedobjectprofile: Optional[bool] enable_administrativedomain: Optional[bool] enable_authprofile: Optional[bool] enable_commandsnippet: Optional[bool] enable_allocationgroup: Optional[bool] enable_networksegment: Optional[bool] enable_object: Optional[bool] enable_objectmodel: Optional[bool] enable_platform: Optional[bool] enable_resourcegroup: Optional[bool] enable_sensor: Optional[bool] enable_sensorprofile: Optional[bool] enable_subscriber: Optional[bool] enable_subscriberprofile: Optional[bool] enable_supplier: Optional[bool] enable_supplierprofile: Optional[bool] enable_dnszone: Optional[bool] enable_dnszonerecord: Optional[bool] enable_division: Optional[bool] enable_kbentry: Optional[bool] enable_ipaddress: Optional[bool] enable_addressprofile: Optional[bool] enable_ipaddressrange: Optional[bool] enable_ipprefix: Optional[bool] enable_prefixprofile: Optional[bool] enable_vrf: Optional[bool] enable_vrfgroup: Optional[bool] enable_asn: Optional[bool] enable_assetpeer: Optional[bool] enable_peer: Optional[bool] enable_vc: Optional[bool] enable_vlan: Optional[bool] enable_vlanprofile: Optional[bool] enable_vpn: Optional[bool] enable_vpnprofile: Optional[bool] enable_slaprobe: Optional[bool] enable_slaprofile: Optional[bool] enable_alarm: Optional[bool] expose_metric: Optional[bool] expose_datastream: Optional[bool]
31.092105
72
0.710114
505
4,726
6.425743
0.184158
0.336518
0.465948
0.011094
0.896148
0.886287
0.886287
0.886287
0.886287
0.886287
0
0.006119
0.170123
4,726
151
73
31.298013
0.821265
0.072366
0
0.916031
0
0
0
0
0
0
0
0
0
1
0
true
0
0.022901
0
1
0
0
0
0
null
1
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
11
89471065c8caf019a4c3772ca1a6c1b4e2d46f29
9,911
py
Python
GrasslandModels/models/choler2010.py
sdtaylor/pyPhenoGrass
6e1bdff439db41f0b8ff5f4ffcea276fe13ccdc2
[ "MIT" ]
null
null
null
GrasslandModels/models/choler2010.py
sdtaylor/pyPhenoGrass
6e1bdff439db41f0b8ff5f4ffcea276fe13ccdc2
[ "MIT" ]
30
2019-11-27T17:22:23.000Z
2021-09-10T13:09:33.000Z
GrasslandModels/models/choler2010.py
sdtaylor/pyPhenoGrass
6e1bdff439db41f0b8ff5f4ffcea276fe13ccdc2
[ "MIT" ]
null
null
null
from . import utils from .base import BaseModel import numpy as np class CholerM1(BaseModel): """ The "M1" four parameter model described in Choler et al. 2010 """ def __init__(self, parameters={}): BaseModel.__init__(self) self.all_required_parameters = {'a1': (0, 100), 'a2': (0, 100), 'a3': (0, 100), 'L': (1,30)} self._organize_parameters(parameters) self._required_predictors = {'precip': 'per_timestep', 'evap' : 'per_timestep', 'Wcap' : 'per_site'} self.state_variables = ['V','W','Dt'] self.set_internal_method(method='numpy') def set_internal_method(self, method = 'numpy'): if method == 'cython': raise NotImplementedError('cython method not implemented for this model') elif method == 'numpy': self._apply_model = self._apply_model_numpy else: raise ValueError('Unknown internal method: ' + method) def _apply_model_numpy(self, # Site specific drivers precip, # precip, Daily vector evap, # potential ET, Daily vector Wcap, # field capacity, single value/site # Model parameters a1, a2, a3, L, # Contraints on vegetation. Vmin = 0.001, # Needs to be small non-zero value Vmax = 1., # 100% cause GCC is scaled 0-1 # Note in the original Choler 2010 paper, Vmax is a site # specific value set to the maximum value observed at a site. # This is not feasable for extrapolation though. # Initial conditions W_initial = 0, Wstart = 0, V_initial = 0.001, # Normally just the V (vegatation cover) should be returned, # but for diagnostics use 'all' to get V, W, and Dt return_vars = 'V' ): """ """ L = int(L) # must be a whole number, any floats will be truncated. # Initialize everything # Primary state variables W = np.empty_like(precip).astype('float32') W[:] = W_initial V = np.empty_like(precip).astype('float32') V[:] = V_initial # Derived variables Dt = np.zeros_like(precip).astype('float32') # Site level vars such as lagged plant-water and # temp responses # In the Choler2010 paper this term is We. It's marked # add here to match the other models Dtl = np.empty_like(Wcap) Dtl1 = np.empty_like(Wcap) n_timesteps = precip.shape[0] - 1 for i in range(1,n_timesteps): # if we are near the start of the timeseries then initialize # soil/plant water to something reasonable # Condition (iii) if i - L - 1 < 0: Dt[i] = np.maximum(0, W[i] - a3) Dtl[:] = Wstart Dtl1[:] = Wstart else: Dt[i] = np.maximum(0, W[i] - a3) Dtl = Dt[i-L] Dtl1 = Dt[i-L-1] # Soil water W[i+1] = W[i] + precip[i] - a1 * (W[i]/Wcap) * evap[i] # Condition (ii) W[i+1] = np.maximum(0, np.minimum(Wcap, W[i+1])) # Primary veg growth equation V[i+1] = V[i] + a2 * (Dtl -Dtl1) # Condtiion (iv) # Constrain veg to 0-1 V[i+1] = np.maximum(Vmin, np.minimum(Vmax, V[i+1])) if return_vars == 'V': return V elif return_vars == 'all': return {'V':V, 'W':W, 'Dt':Dt} class CholerM1A(CholerM1): """ The "M1A" model described in Choler et al. 2010 This fixes the a3 parameter to 0 """ def __init__(self, parameters={}): CholerM1.__init__(self) self.all_required_parameters = {'a1': (0, 100), 'a2': (0, 100), 'a3': 0, 'L': (1,30)} self._organize_parameters(parameters) class CholerM1B(CholerM1): """ The "M1B" model described in Choler et al. 2010 Parameterizing the full model essentially. Described here for completeness. """ def __init__(self, parameters={}): CholerM1.__init__(self) self._organize_parameters(parameters) class CholerM2(BaseModel): """ The "M2" model described in Choler et al. 2010 """ def __init__(self, parameters={}): BaseModel.__init__(self) self.all_required_parameters = {'b1': (0, 100), 'b2': (0, 100), 'b3': (0, 100), 'b4': (0, 100), 'b5': (0,100)} self._organize_parameters(parameters) self._required_predictors = {'precip': 'per_timestep', 'evap' : 'per_timestep', 'Wcap' : 'per_site'} self.state_variables = ['V','W','Dt'] self.set_internal_method(method='numpy') def set_internal_method(self, method = 'numpy'): if method == 'cython': raise NotImplementedError('cython method not implemented for this model') elif method == 'numpy': self._apply_model = self._apply_model_numpy else: raise ValueError('Unknown internal method: ' + method) def _apply_model_numpy(self, # Site specific drivers precip, # precip, Daily vector evap, # potential ET, Daily vector Wcap, # field capacity, single value/site # Model parameters b1, b2, b3, b4, b5, # Contraints on vegetation. Vmin = 0.001, # Needs to be small non-zero value Vmax = 1., # 100% cause GCC is scaled 0-1 # Note in the original Choler 2010 paper, Vmax is a site # specific value set to the maximum value observed at a site. # This is not feasable for extrapolation though. # Initial conditions W_initial = 0, Wstart = 0, V_initial = 0.001, # Normally just the V (vegatation cover) should be returned, # but for diagnostics use 'all' to get V, W, and Dt return_vars = 'V' ): """ """ # Initialize everything # Primary state variables W = np.empty_like(precip).astype('float32') W[:] = W_initial V = np.empty_like(precip).astype('float32') V[:] = V_initial # Derived variables Dt = np.zeros_like(precip).astype('float32') n_timesteps = precip.shape[0] - 1 for i in range(1,n_timesteps): # plant available water # condition (iii) Dt[i] = np.maximum(0, W[i] - b5) # Soil water W[i+1] = W[i] + precip[i] - b1 * (1 - V[i]) * (W[i]/Wcap) * evap[i] - b2 * V[i] * Dt[i] # condition (ii) W[i+1] = np.maximum(0, np.minimum(Wcap, W[i+1])) # Primary veg growth equation V[i+1] = b3 * (Dt[i]/(Wcap - b5)) * V[i] * (1-(V[i]/Vmax)) - (b4 * V[i]) # Condtiion (iv) # Constrain veg to 0-1 V[i+1] = np.maximum(Vmin, np.minimum(Vmax, V[i+1])) if return_vars == 'V': return V elif return_vars == 'all': return {'V':V, 'W':W, 'Dt':Dt} class CholerM2A(CholerM2): """ The "M2A" model described in Choler et al. 2010 Here the b5 parameter is fixed at 0, essentially making plant available water equal to total soil water. """ def __init__(self, parameters={}): CholerM2.__init__(self) self.all_required_parameters = {'b1': (0, 100), 'b2': (0, 100), 'b3': (0, 100), 'b4': (0, 100), 'b5': 0} self._organize_parameters(parameters) class CholerM2B(CholerM2): """ The "M2B" model described in Choler et al. 2010 Parameterizing the full model essentially. Described here for completeness. """ def __init__(self, parameters={}): CholerM2.__init__(self) self._organize_parameters(parameters)
38.564202
103
0.439613
995
9,911
4.248241
0.20201
0.013248
0.004968
0.031228
0.821859
0.804116
0.790868
0.763662
0.740478
0.721079
0
0.044787
0.463828
9,911
256
104
38.714844
0.750659
0.233982
0
0.742647
0
0
0.050595
0
0
0
0
0
0
1
0.073529
false
0
0.022059
0
0.169118
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
896547ff9c2c3d2181f0ce2140b404559bae744f
33,171
py
Python
exact_sync/v1/api/annotations_api.py
maubreville/EXACT-Sync
47a47e5af360292677601a877e0765d5e01bd2df
[ "MIT" ]
null
null
null
exact_sync/v1/api/annotations_api.py
maubreville/EXACT-Sync
47a47e5af360292677601a877e0765d5e01bd2df
[ "MIT" ]
null
null
null
exact_sync/v1/api/annotations_api.py
maubreville/EXACT-Sync
47a47e5af360292677601a877e0765d5e01bd2df
[ "MIT" ]
null
null
null
# coding: utf-8 """ EXACT - API API to interact with the EXACT Server # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import from exact_sync.v1.api.pagination_base_api import PaginationBaseAPI import re # noqa: F401 # python 2 and python 3 compatibility library import six from exact_sync.v1.api_client import ApiClient class AnnotationsApi(PaginationBaseAPI): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_annotation(self, **kwargs): # noqa: E501 """create_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_annotation(async_req=True) >>> result = thread.get() :param async_req bool :param Annotation body: :return: Annotation If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_annotation_with_http_info(**kwargs) # noqa: E501 else: (data) = self.create_annotation_with_http_info(**kwargs) # noqa: E501 return data def create_annotation_with_http_info(self, **kwargs): # noqa: E501 """create_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_annotation_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param Annotation body: :return: Annotation If the method is called asynchronously, returns the request thread. """ all_params = ['body'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_annotation" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = {} if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/api/v1/annotations/annotations/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Annotation', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def destroy_annotation(self, id, **kwargs): # noqa: E501 """destroy_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.destroy_annotation(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.destroy_annotation_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.destroy_annotation_with_http_info(id, **kwargs) # noqa: E501 return data def destroy_annotation_with_http_info(self, id, **kwargs): # noqa: E501 """destroy_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.destroy_annotation_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method destroy_annotation" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `destroy_annotation`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/api/v1/annotations/annotations/{id}/', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_annotations(self, pagination:bool=True, **kwargs): # noqa: E501 """list_annotations # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_annotations(async_req=True) >>> result = thread.get() :param async_req bool :param int limit: Number of results to return per page. :param int offset: The initial index from which to return the results. :param str id: id :param str time: time :param str time__lte: time__lte :param str time__gte: time__gte :param str time__range: time__range :param str unique_identifier: unique_identifier :param str unique_identifier__contains: unique_identifier__contains :param str description: description :param str description__contains: description__contains :param str deleted: deleted :param str image: image :param str user: user :param str annotation_type: annotation_type :param str verified_by: verified_by :param str verified_by__range: verified_by__range :param str vector_x: Vector-X-Range :param str vector_y: Vector-Y-Range :param bool meta_data__isnull: Meta data is null :param bool vector__isnull: Vector is null :return: Annotations If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if pagination: if kwargs.get('async_req'): return self.list_annotations_with_http_info(**kwargs) # noqa: E501 else: (data) = self.list_annotations_with_http_info(**kwargs) # noqa: E501 return data else: return self._get_all(self.list_annotations_with_http_info, **kwargs) def list_annotations_with_http_info(self, **kwargs): # noqa: E501 """list_annotations # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_annotations_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param int limit: Number of results to return per page. :param int offset: The initial index from which to return the results. :param str id: id :param str time: time :param str time__lte: time__lte :param str time__gte: time__gte :param str time__range: time__range :param str unique_identifier: unique_identifier :param str unique_identifier__contains: unique_identifier__contains :param str description: description :param str description__contains: description__contains :param str deleted: deleted :param str image: image :param str user: user :param str annotation_type: annotation_type :param str verified_by: verified_by :param str verified_by__range: verified_by__range :param str vector_x: Vector-X-Range :param str vector_y: Vector-Y-Range :param bool meta_data__isnull: Meta data is null :param bool vector__isnull: Vector is null :return: Annotations If the method is called asynchronously, returns the request thread. """ all_params = ['limit', 'offset', 'id', 'time', 'time__lte', 'time__gte', 'time__range', 'unique_identifier', 'unique_identifier__contains', 'description', 'description__contains', 'deleted', 'image', 'user', 'annotation_type', 'verified_by', 'verified_by__range', 'vector_x', 'vector_y', 'meta_data__isnull', 'vector__isnull'] # noqa: E501 all_params.append('omit') all_params.append('fields') all_params.append('expand') all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_annotations" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'limit' in params: query_params.append(('limit', params['limit'])) # noqa: E501 if 'offset' in params: query_params.append(('offset', params['offset'])) # noqa: E501 if 'id' in params: query_params.append(('id', params['id'])) # noqa: E501 if 'time' in params: query_params.append(('time', params['time'])) # noqa: E501 if 'time__lte' in params: query_params.append(('time__lte', params['time__lte'])) # noqa: E501 if 'time__gte' in params: query_params.append(('time__gte', params['time__gte'])) # noqa: E501 if 'time__range' in params: query_params.append(('time__range', params['time__range'])) # noqa: E501 if 'unique_identifier' in params: query_params.append(('unique_identifier', params['unique_identifier'])) # noqa: E501 if 'unique_identifier__contains' in params: query_params.append(('unique_identifier__contains', params['unique_identifier__contains'])) # noqa: E501 if 'description' in params: query_params.append(('description', params['description'])) # noqa: E501 if 'description__contains' in params: query_params.append(('description__contains', params['description__contains'])) # noqa: E501 if 'deleted' in params: query_params.append(('deleted', params['deleted'])) # noqa: E501 if 'image' in params: query_params.append(('image', params['image'])) # noqa: E501 if 'user' in params: query_params.append(('user', params['user'])) # noqa: E501 if 'annotation_type' in params: query_params.append(('annotation_type', params['annotation_type'])) # noqa: E501 if 'verified_by' in params: query_params.append(('verified_by', params['verified_by'])) # noqa: E501 if 'verified_by__range' in params: query_params.append(('verified_by__range', params['verified_by__range'])) # noqa: E501 if 'vector_x' in params: query_params.append(('vector_x', params['vector_x'])) # noqa: E501 if 'vector_y' in params: query_params.append(('vector_y', params['vector_y'])) # noqa: E501 if 'meta_data__isnull' in params: query_params.append(('meta_data__isnull', params['meta_data__isnull'])) # noqa: E501 if 'vector__isnull' in params: query_params.append(('vector__isnull', params['vector__isnull'])) # noqa: E501 if 'omit' in params: query_params.append(('omit', params['omit'])) # noqa: E501 if 'fields' in params: query_params.append(('fields', params['fields'])) # noqa: E50 if 'expand' in params: query_params.append(('expand', params['expand'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/api/v1/annotations/annotations/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Annotations', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def partial_update_annotation(self, id, **kwargs): # noqa: E501 """partial_update_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.partial_update_annotation(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param int annotation_type: :param object vector: :param int image: :param int last_editor: :param int user: :param bool deleted: :param str description: :param str unique_identifier: :param list[int] uploaded_media_files: :param object meta_data: :return: Annotation If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.partial_update_annotation_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.partial_update_annotation_with_http_info(id, **kwargs) # noqa: E501 return data def partial_update_annotation_with_http_info(self, id, **kwargs): # noqa: E501 """partial_update_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.partial_update_annotation_with_http_info(id, async_req=True) >>> result = thread.get() :param int annotation_type: :param object vector: :param int image: :param int last_editor: :param int user: :param bool deleted: :param str description: :param str unique_identifier: :param list[int] uploaded_media_files: :param object meta_data: :param last_edit_time :return: Annotation If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'annotation_type', 'last_edit_time', 'vector', 'image', 'last_editor', 'user', 'deleted', 'description', 'unique_identifier', 'uploaded_media_files', 'meta_data'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method partial_update_annotation" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `partial_update_annotation`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = {} if 'annotation_type' in params: body_params['annotation_type'] = params['annotation_type'] if 'vector' in params: body_params['vector'] = params['vector'] if 'image' in params: body_params['image'] = params['image'] if 'last_editor' in params: body_params['last_editor'] = params['last_editor'] if 'user' in params: body_params['user'] = params['user'] if 'deleted' in params: body_params['deleted'] = params['deleted'] if 'description' in params: body_params['description'] = params['description'] if 'unique_identifier' in params: body_params['unique_identifier'] = params['unique_identifier'] if 'uploaded_media_files' in params: body_params['uploaded_media_files'] = params['uploaded_media_files'] if 'meta_data' in params: body_params['meta_data'] = params['meta_data'] if 'last_edit_time' in params: body_params['last_edit_time'] = params['last_edit_time'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/api/v1/annotations/annotations/{id}/', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Annotation', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def retrieve_annotation(self, id, **kwargs): # noqa: E501 """retrieve_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.retrieve_annotation(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str id2: id :param str time: time :param str time__lte: time__lte :param str time__gte: time__gte :param str time__range: time__range :param str unique_identifier: unique_identifier :param str unique_identifier__contains: unique_identifier__contains :param str description: description :param str description__contains: description__contains :param str deleted: deleted :param str image: image :param str user: user :param str annotation_type: annotation_type :param str verified_by: verified_by :param str verified_by__range: verified_by__range :param str vector_x: Vector-X-Range :param str vector_y: Vector-Y-Range :return: Annotation If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.retrieve_annotation_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.retrieve_annotation_with_http_info(id, **kwargs) # noqa: E501 return data def retrieve_annotation_with_http_info(self, id, **kwargs): # noqa: E501 """retrieve_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.retrieve_annotation_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str id2: id :param str time: time :param str time__lte: time__lte :param str time__gte: time__gte :param str time__range: time__range :param str unique_identifier: unique_identifier :param str unique_identifier__contains: unique_identifier__contains :param str description: description :param str description__contains: description__contains :param str deleted: deleted :param str image: image :param str user: user :param str annotation_type: annotation_type :param str verified_by: verified_by :param str verified_by__range: verified_by__range :param str vector_x: Vector-X-Range :param str vector_y: Vector-Y-Range :return: Annotation If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'id2', 'time', 'time__lte', 'time__gte', 'time__range', 'unique_identifier', 'unique_identifier__contains', 'description', 'description__contains', 'deleted', 'image', 'user', 'annotation_type', 'verified_by', 'verified_by__range', 'vector_x', 'vector_y'] # noqa: E501 all_params.append('omit') all_params.append('fields') all_params.append('expand') all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method retrieve_annotation" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `retrieve_annotation`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] if 'id2' in params: query_params.append(('id', params['id2'])) # noqa: E501 if 'time' in params: query_params.append(('time', params['time'])) # noqa: E501 if 'time__lte' in params: query_params.append(('time__lte', params['time__lte'])) # noqa: E501 if 'time__gte' in params: query_params.append(('time__gte', params['time__gte'])) # noqa: E501 if 'time__range' in params: query_params.append(('time__range', params['time__range'])) # noqa: E501 if 'unique_identifier' in params: query_params.append(('unique_identifier', params['unique_identifier'])) # noqa: E501 if 'unique_identifier__contains' in params: query_params.append(('unique_identifier__contains', params['unique_identifier__contains'])) # noqa: E501 if 'description' in params: query_params.append(('description', params['description'])) # noqa: E501 if 'description__contains' in params: query_params.append(('description__contains', params['description__contains'])) # noqa: E501 if 'deleted' in params: query_params.append(('deleted', params['deleted'])) # noqa: E501 if 'image' in params: query_params.append(('image', params['image'])) # noqa: E501 if 'user' in params: query_params.append(('user', params['user'])) # noqa: E501 if 'annotation_type' in params: query_params.append(('annotation_type', params['annotation_type'])) # noqa: E501 if 'verified_by' in params: query_params.append(('verified_by', params['verified_by'])) # noqa: E501 if 'verified_by__range' in params: query_params.append(('verified_by__range', params['verified_by__range'])) # noqa: E501 if 'vector_x' in params: query_params.append(('vector_x', params['vector_x'])) # noqa: E501 if 'vector_y' in params: query_params.append(('vector_y', params['vector_y'])) # noqa: E501 if 'omit' in params: query_params.append(('omit', params['omit'])) # noqa: E501 if 'fields' in params: query_params.append(('fields', params['fields'])) # noqa: E50 if 'expand' in params: query_params.append(('expand', params['expand'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/api/v1/annotations/annotations/{id}/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Annotation', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_annotation(self, id, **kwargs): # noqa: E501 """update_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_annotation(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param Annotation body: :return: Annotation If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_annotation_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.update_annotation_with_http_info(id, **kwargs) # noqa: E501 return data def update_annotation_with_http_info(self, id, **kwargs): # noqa: E501 """update_annotation # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_annotation_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param Annotation body: :return: Annotation If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'body'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_annotation" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `update_annotation`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/api/v1/annotations/annotations/{id}/', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Annotation', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
41.104089
348
0.610563
3,790
33,171
5.06781
0.05277
0.048316
0.046025
0.043526
0.92534
0.90946
0.904358
0.890613
0.889051
0.87666
0
0.015878
0.289892
33,171
806
349
41.155087
0.799533
0.308311
0
0.788155
1
0
0.226062
0.055479
0
0
0
0
0
1
0.029613
false
0
0.01139
0
0.08656
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
987169fa0ac1ca2c382ee051da2c562cbba4e931
199
py
Python
api-reconstruction/analysis/__init__.py
eurecom-s3/syscall2api
2f2c72c759b0fd803fe1302c3b6717cda1906916
[ "MIT" ]
10
2019-09-24T13:36:15.000Z
2021-11-01T02:40:10.000Z
api-reconstruction/analysis/__init__.py
eurecom-s3/syscall2api
2f2c72c759b0fd803fe1302c3b6717cda1906916
[ "MIT" ]
2
2020-10-19T11:51:08.000Z
2021-04-17T01:08:23.000Z
api-reconstruction/analysis/__init__.py
eurecom-s3/syscall2api
2f2c72c759b0fd803fe1302c3b6717cda1906916
[ "MIT" ]
null
null
null
from .analysis_internals import * from .classes import * from .utils import * from .generic_models import * from .less_generic_models import * from .decorators import * from .trace_analysis import *
24.875
34
0.788945
26
199
5.846154
0.423077
0.394737
0.25
0.302632
0
0
0
0
0
0
0
0
0.140704
199
7
35
28.428571
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
98751f83e02ca52c7b9a95c339613cdf69ad5d4d
3,730
py
Python
tests/test_cards/test_actions/test_sentry.py
evanofslack/pyminion
0d0bfc6d8e84e9f33e617c7d01b6edb649166290
[ "MIT" ]
5
2021-12-17T20:34:55.000Z
2022-01-24T15:18:05.000Z
tests/test_cards/test_actions/test_sentry.py
evanofslack/pyminion
0d0bfc6d8e84e9f33e617c7d01b6edb649166290
[ "MIT" ]
31
2021-10-29T21:05:00.000Z
2022-03-22T03:27:14.000Z
tests/test_cards/test_actions/test_sentry.py
evanofslack/pyminion
0d0bfc6d8e84e9f33e617c7d01b6edb649166290
[ "MIT" ]
1
2021-12-23T18:32:47.000Z
2021-12-23T18:32:47.000Z
from pyminion.expansions.base import copper, estate, gold, sentry from pyminion.game import Game from pyminion.players import Human def test_sentry_no_reorder(human: Human, game: Game, monkeypatch): human.deck.cards = [] human.deck.add(gold) human.deck.add(copper) human.deck.add(copper) human.hand.add(sentry) assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.deck.cards[1].name == "Copper" assert human.deck.cards[0].name == "Gold" responses = iter(["", "", "no"]) monkeypatch.setattr("builtins.input", lambda input: next(responses)) human.play(sentry, game) assert len(human.hand) == 1 assert len(human.playmat) == 1 assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.state.actions == 1 assert len(human.deck) == 2 assert human.deck.cards[1].name == "Copper" assert human.deck.cards[0].name == "Gold" def test_sentry_yes_reorder(human: Human, game: Game, monkeypatch): human.deck.cards = [] human.deck.add(gold) human.deck.add(copper) human.deck.add(copper) human.hand.add(sentry) assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.deck.cards[1].name == "Copper" assert human.deck.cards[0].name == "Gold" responses = iter(["", "", "yes"]) monkeypatch.setattr("builtins.input", lambda input: next(responses)) human.play(sentry, game) assert len(human.hand) == 1 assert len(human.playmat) == 1 assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.state.actions == 1 assert len(human.deck) == 2 assert human.deck.cards[0].name == "Copper" assert human.deck.cards[1].name == "Gold" def test_sentry_trash_two(human: Human, game: Game, monkeypatch): human.deck.cards = [] human.deck.add(estate) human.deck.add(copper) human.deck.add(copper) human.hand.add(sentry) assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.deck.cards[1].name == "Copper" assert human.deck.cards[0].name == "Estate" responses = iter(["copper, estate"]) monkeypatch.setattr("builtins.input", lambda input: next(responses)) human.play(sentry, game) assert len(human.hand) == 1 assert len(human.discard_pile) == 0 assert len(game.trash) == 2 assert len(human.deck) == 0 def test_sentry_discard_two(human: Human, game: Game, monkeypatch): human.deck.cards = [] human.deck.add(estate) human.deck.add(copper) human.deck.add(copper) human.hand.add(sentry) assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.deck.cards[1].name == "Copper" assert human.deck.cards[0].name == "Estate" responses = iter(["", "copper, estate"]) monkeypatch.setattr("builtins.input", lambda input: next(responses)) human.play(sentry, game) assert len(human.hand) == 1 assert len(human.discard_pile) == 2 assert len(game.trash) == 0 assert len(human.deck) == 0 def test_sentry_trash_one_discard_one(human: Human, game: Game, monkeypatch): human.deck.cards = [] human.deck.add(estate) human.deck.add(copper) human.deck.add(copper) human.hand.add(sentry) assert len(human.discard_pile) == 0 assert len(game.trash) == 0 assert human.deck.cards[1].name == "Copper" assert human.deck.cards[0].name == "Estate" responses = iter(["copper", "estate"]) monkeypatch.setattr("builtins.input", lambda input: next(responses)) human.play(sentry, game) assert len(human.hand) == 1 assert len(human.discard_pile) == 1 assert len(game.trash) == 1 assert len(human.deck) == 0
30.57377
77
0.660054
522
3,730
4.664751
0.084291
0.144148
0.126489
0.11499
0.924435
0.903901
0.881314
0.881314
0.855031
0.855031
0
0.015852
0.188204
3,730
121
78
30.826446
0.788309
0
0
0.802083
0
0
0.051206
0
0
0
0
0
0.5
1
0.052083
false
0
0.03125
0
0.083333
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
8
98976e1ff875b27a2fadf36bccb958474f8deb67
35,362
py
Python
test/test_shapes.py
OxDuke/pyfcl
b6deb0fac90791b5b8765de1cf863656a7908407
[ "MIT" ]
null
null
null
test/test_shapes.py
OxDuke/pyfcl
b6deb0fac90791b5b8765de1cf863656a7908407
[ "MIT" ]
null
null
null
test/test_shapes.py
OxDuke/pyfcl
b6deb0fac90791b5b8765de1cf863656a7908407
[ "MIT" ]
null
null
null
""" @AUTHOR: Weidong Sun @EMAIL: swdswd28@foxmail.com """ from __future__ import print_function, division import numpy as np import unittest import fcl from common_utils import double_float_difference def nonzero_rand(*args): return np.random.rand(*args) + 0.05 def test_shape_self_collide(shape1, shape2, tf1, tf2, is_in_collision): co1 = fcl.CollisionObject(shape1, tf1) co2 = fcl.CollisionObject(shape2, tf2) req = fcl.CollisionRequest() res = fcl.CollisionResult() ret = fcl.collide(co1, co2, req, res) assert res.is_collision == is_in_collision def test_shape_self_collide_single_contact(shape1, shape2, tf1, tf2, normal, pos, penetration_depth, normal_atol=0, pos_atol=0, depth_atol=0): co1 = fcl.CollisionObject(shape1, tf1) co2 = fcl.CollisionObject(shape2, tf2) req = fcl.CollisionRequest(num_max_contacts=1, enable_contact=True) res = fcl.CollisionResult() ret = fcl.collide(co1, co2, req, res) # Two objects should be in collision assert res.is_collision == True # Single contact # print(len(res.contacts)) # for cot in res.contacts: # print('-----------------') # print(cot.normal) # print(cot.pos) # print(cot.penetration_depth) assert len(res.contacts) == 1 contact = res.contacts[0] np.testing.assert_allclose(contact.normal, normal, rtol=0, atol=normal_atol + double_float_difference) np.testing.assert_allclose(contact.pos, pos, rtol=0, atol=pos_atol + double_float_difference) np.testing.assert_allclose(contact.penetration_depth, penetration_depth, rtol=0, atol=depth_atol + double_float_difference) def test_shape_self_distance(shape1, shape2, tf1, tf2, expected_distance, atol=0): co1 = fcl.CollisionObject(shape1, tf1) co2 = fcl.CollisionObject(shape2, tf2) req = fcl.DistanceRequest() res = fcl.DistanceResult() ret = fcl.distance(co1, co2, req, res) np.testing.assert_allclose(res.min_distance, expected_distance, rtol=0, atol=atol + double_float_difference) class TestTriangleP(unittest.TestCase): def test_properties(self): random_vertices = [np.random.rand(3) for i in range(3)] tri = fcl.TriangleP(*random_vertices) np.testing.assert_allclose(tri.a, random_vertices[0], rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(tri.b, random_vertices[1], rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(tri.c, random_vertices[2], rtol=0, atol=0 + double_float_difference) random_vertices = [np.random.rand(3) for i in range(3)] tri.a, tri.b, tri.c = random_vertices np.testing.assert_allclose(tri.a, random_vertices[0], rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(tri.b, random_vertices[1], rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(tri.c, random_vertices[2], rtol=0, atol=0 + double_float_difference) def test_self_collide(self): # t1 = fcl.TriangleP(*np.array([[0,0,0],[1,0,0],[0,1,0]])) # t2 = fcl.TriangleP(*np.array([[0,1,0],[-1,0,0],[1,0,0]])) # test_shape_self_collide(t1, t2, # fcl.Transform(np.array([1,0,0,0]), np.array([0,0,0])), # fcl.Transform(np.array([1,0,0,0]), np.array([-0.999,0,0])), # False) # test_shape_self_collide(t1, t2, # fcl.Transform(np.array([1,0,0,0]), np.array([0,0,0])), # fcl.Transform(np.array([1,0,0,0]), np.array([-1.001,0,0])), # False) # @TODO: need tests here pass def test_self_distance(self): # @TODO: need tests here pass class TestBox(unittest.TestCase): def test_properties(self): random_sides = np.random.rand(3) box = fcl.Box(*random_sides) np.testing.assert_allclose(box.side, random_sides, rtol=0, atol=0 + double_float_difference) random_sides = np.random.rand(3) box.side = random_sides np.testing.assert_allclose(box.side, random_sides, rtol=0, atol=0 + double_float_difference) self.assertTrue(box.getNodeType() == 9) # @TODO: test aabb's center def test_self_collide(self): # Seperate on X axis test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0.999, 0, 0])), True) test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.001, 0, 0])), False) # Seperate on Y axis test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 1.499, 0])), True) test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 1.501, 0])), False) # Seperate on Z axis test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 1.999])), True) test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 2.001])), False) # Rotate box1 around Z axis for 90 degrees # Note that the quaternion is in [w,x,y,z] order. rotate_around_z_90_degrees = np.array([0.70710678, 0., 0., 0.70710678]) test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.499, 0, 0])), fcl.Transform(rotate_around_z_90_degrees, np.array([0, 0, 0])), True) test_shape_self_collide( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.501, 0, 0])), fcl.Transform(rotate_around_z_90_degrees, np.array([0, 0, 0])), False) def test_self_collide_with_contact(self): # Contact on X axis test_shape_self_collide_single_contact( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0.8, 0, 0])), normal=[1, 0, 0], pos=[0.4, 0.5, 0.5], penetration_depth=0.2, depth_atol=1e-14) def test_self_distance(self): # Seperation on X, Y, Z axis test_shape_self_distance( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2, 0, 0])), 1.0) test_shape_self_distance( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 3, 0])), 1.5) test_shape_self_distance( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 3])), 1.0) # If two boxes collide, distance() should return -1. test_shape_self_distance( fcl.Box(1, 1, 1), fcl.Box(1, 2, 3), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1, 0, 0])), -1.0) class TestSphere(unittest.TestCase): def test_properties(self): random_radius = np.random.rand() sphere = fcl.Sphere(random_radius) np.testing.assert_allclose(sphere.radius, random_radius, rtol=0, atol=0 + double_float_difference) random_radius = np.random.rand() sphere.radius = random_radius np.testing.assert_allclose(sphere.radius, random_radius, rtol=0, atol=0 + double_float_difference) self.assertTrue(sphere.getNodeType() == 10) def test_self_collide(self): test_shape_self_collide( fcl.Sphere(1), fcl.Sphere(2), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2.999, 0, 0])), True) test_shape_self_collide( fcl.Sphere(1), fcl.Sphere(2), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([3.001, 0, 0])), False) def test_self_collide_with_contact(self): # Contact on X axis test_shape_self_collide_single_contact( fcl.Sphere(1), fcl.Sphere(2), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2, 0, 0])), normal=[1, 0, 0], pos=[2. / 3, 0, 0], penetration_depth=1, depth_atol=1e-14) test_shape_self_collide_single_contact( fcl.Sphere(1), fcl.Sphere(1), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.6, 0, 0])), normal=[1, 0, 0], pos=[0.8, 0, 0], penetration_depth=0.4, depth_atol=1e-14) def test_self_distance(self): test_shape_self_distance( fcl.Sphere(1), fcl.Sphere(2), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([4, 0, 0])), 1.) test_shape_self_distance( fcl.Sphere(1), fcl.Sphere(2), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2, 0, 0])), -1) class TestEllipsoid(unittest.TestCase): def test_properties(self): random_radii = np.random.rand(3) ellipsoid = fcl.Ellipsoid(random_radii[0], random_radii[1], random_radii[2]) np.testing.assert_allclose(ellipsoid.radii, random_radii, rtol=0, atol=0 + double_float_difference) random_radii = np.random.rand(3) ellipsoid.radii = random_radii np.testing.assert_allclose(ellipsoid.radii, random_radii, rtol=0, atol=0 + double_float_difference) def test_self_collide(self): e1, e2 = fcl.Ellipsoid(1, 2, 3), fcl.Ellipsoid(1, 2, 3) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.999, 0, 0])), True) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2.001, 0, 0])), False) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 3.999, 0])), True) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 4.001, 0])), False) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 5.999])), True) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 6.001])), False) rotate_around_x_90_degrees = np.array([0.70710678, 0.70710678, 0., 0.]) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 2.999, 0])), fcl.Transform(rotate_around_x_90_degrees, np.array([0, 4.999, 0])), True) test_shape_self_collide( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(rotate_around_x_90_degrees, np.array([0, 5.001, 0])), False) def test_self_distance(self): e1, e2 = fcl.Ellipsoid(1, 1, 1), fcl.Ellipsoid(1, 2, 3) test_shape_self_distance(e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([3, 0, 0])), 1., atol=1e-6) test_shape_self_distance(e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 4, 0])), 1., atol=1e-6) test_shape_self_distance(e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 5])), 1., atol=1e-6) # Two elliposoids collide test_shape_self_distance( e1, e2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1, 0, 0])), -1) class TestCapsule(unittest.TestCase): def test_properties(self): random_radius, random_lz = nonzero_rand(), nonzero_rand() c = fcl.Capsule(random_radius, random_lz) np.testing.assert_allclose(c.radius, random_radius, rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(c.lz, random_lz, rtol=0, atol=0 + double_float_difference) random_radius, random_lz = nonzero_rand(), nonzero_rand() c.radius = random_radius c.lz = random_lz np.testing.assert_allclose(c.radius, random_radius, rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(c.lz, random_lz, rtol=0, atol=0 + double_float_difference) def test_self_collide(self): c1, c2 = fcl.Capsule(0.5, 2), fcl.Capsule(1, 4) # Seperation on X-Y plane test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.499, 0, 0])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([1.501, 0, 0])), False) # Seperation on Z axis test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 4.499])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 4.501])), False) # Rotate c2 around X axis, then move along Y axis rotate_around_x_90_degrees = np.array([0.70710678, 0.70710678, 0., 0.]) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(rotate_around_x_90_degrees, np.array([0, 2.499, 0])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(rotate_around_x_90_degrees, np.array([0, 3.501, 0])), False) def test_self_distance(self): c1, c2 = fcl.Capsule(0.5, 2), fcl.Capsule(1, 4) # Seperation on X, Y, Z axis test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2, 0, 0])), 0.5) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 2, 0])), 0.5) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 5])), 0.5) # If collide, return -0.5? # It should return -1, @TODO: Why? # Maybe it is easier to compute distance between capsules test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 4])), -0.5) class TestCone(unittest.TestCase): def test_properties(self): random_radius, random_lz = nonzero_rand(), nonzero_rand() c = fcl.Cone(random_radius, random_lz) np.testing.assert_allclose(c.radius, random_radius, rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(c.lz, random_lz, rtol=0, atol=0 + double_float_difference) random_radius, random_lz = nonzero_rand(), nonzero_rand() c.radius = random_radius c.lz = random_lz np.testing.assert_allclose(c.radius, random_radius, rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(c.lz, random_lz, rtol=0, atol=0 + double_float_difference) def test_self_collide(self): c1, c2 = fcl.Cone(1, 2), fcl.Cone(2, 4) # Seperation on X-Y plane test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2.999, 0, 1])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([3.001, 0, 2])), False) # Seperation on Z axis test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 2.999])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 3.001])), False) # Flip one cone upside down c1, c2 = fcl.Cone(1, 2), fcl.Cone(1, 2) rotate_around_x_180_degrees = np.array([0, 1, 0, 0]) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 1])), fcl.Transform(rotate_around_x_180_degrees, np.array([0, 0.999, 1])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 1])), fcl.Transform(rotate_around_x_180_degrees, np.array([0, 1.001, 1])), False) def test_self_distance(self): c1, c2 = fcl.Cone(1, 2), fcl.Cone(2, 4) # Seperation on X,Y,Z axis test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([4, 0, 1])), 1.) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 4, 1])), 1., 1e-6) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 4])), 1.) class TestCylinder(unittest.TestCase): def test_properties(self): random_radius, random_lz = nonzero_rand(), nonzero_rand() c = fcl.Cylinder(random_radius, random_lz) np.testing.assert_allclose(c.radius, random_radius, rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(c.lz, random_lz, rtol=0, atol=0 + double_float_difference) random_radius, random_lz = nonzero_rand(), nonzero_rand() c.radius = random_radius c.lz = random_lz np.testing.assert_allclose(c.radius, random_radius, rtol=0, atol=0 + double_float_difference) np.testing.assert_allclose(c.lz, random_lz, rtol=0, atol=0 + double_float_difference) def test_self_collide(self): c1, c2 = fcl.Cylinder(0.5, 1), fcl.Cylinder(2, 4) # Seperation on X-Y plane test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2.499, 0, 1])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([2.501, 0, 2])), False) # Seperation on Z axis test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 2.499])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 2.501])), False) # Rotate c2 around X axis, then move along Y axis rotate_around_x_90_degrees = np.array([0.70710678, 0.70710678, 0., 0.]) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(rotate_around_x_90_degrees, np.array([0, 2.499, 0])), True) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(rotate_around_x_90_degrees, np.array([0, 3.501, 0])), False) def test_self_distance(self): c1, c2 = fcl.Cylinder(0.5, 1), fcl.Cylinder(2, 4) # Seperation on X,Y,Z axis test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([3, 0, 1])), 0.5) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 3.5, 1])), 1.0, 1e-5) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 3])), 0.5) class TestConvex(unittest.TestCase): # @TODO: make this test complete def setUp(self): vertices = np.array([[0, 0, 1], [0, 0, 0], [0, 1, 0], [1, 0, 0]]) faces = [[0, 2, 1], [0, 1, 3], [0, 3, 2], [1, 2, 3]] nfaces = [len(face) for face in faces] cfaces = [[pair[0]] + pair[1] for pair in zip(nfaces, faces)] cfaces = [item for sublist in cfaces for item in sublist] c1 = fcl.Convex(vertices, len(faces), cfaces) self.tetrahedron1 = c1 vertices = np.array([[0, 0, 1], [0, 0, 0], [-1, 0, 0], [0, 1, 0]]) c2 = fcl.Convex(vertices, len(faces), cfaces) self.tetrahedron2 = c2 def test_self_collide(self): c1, c2 = self.tetrahedron1, self.tetrahedron2 test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([-0.0005, 0, 0])), False) test_shape_self_collide( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0.0005, 0, 0])), True) def test_self_distance(self): c1, c2 = self.tetrahedron1, self.tetrahedron2 test_shape_self_distance(c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([-0.001, 0, 0])), 0.001, atol=1e-7) test_shape_self_distance( c1, c2, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0.001, 0, 0])), -1) def test_properties(self): vertices = np.array([[0, 0, 1], [0, 0, 0], [0, 1, 0], [1, 0, 0]]) faces = [[0, 2, 1], [0, 1, 3], [0, 3, 2], [1, 2, 3]] nfaces = [len(face) for face in faces] cfaces = [[pair[0]] + pair[1] for pair in zip(nfaces, faces)] cfaces = [item for sublist in cfaces for item in sublist] c = fcl.Convex(vertices, len(faces), cfaces) class TestHalfspace(unittest.TestCase): def test_properties(self): random_normal, random_d = nonzero_rand(3), nonzero_rand() random_normal /= np.linalg.norm(random_normal) h = fcl.Halfspace(random_normal, random_d) np.testing.assert_allclose(h.n, random_normal, rtol=0, atol=1e-15 + double_float_difference) np.testing.assert_allclose(h.d, random_d, rtol=0, atol=1e-15 + double_float_difference) random_normal, random_d = nonzero_rand(3), nonzero_rand() random_normal /= np.linalg.norm(random_normal) h.n = random_normal h.d = random_d np.testing.assert_allclose(h.n, random_normal, rtol=0, atol=1e-15 + double_float_difference) np.testing.assert_allclose(h.d, random_d, rtol=0, atol=1e-15 + double_float_difference) def test_self_collide(self): # h1, h2 = fcl.Halfspace(np.array([0, 0, 1]), # -1), fcl.Halfspace(np.array([0, 0, -1]), 2) # @TODO: I am not quite sure what is the correct way to do Halfspace self-collide. pass def test_self_distance(self): pass # @TODO: can't do self distance on HalfSpace, will raise segmentation fault class TestPlane(unittest.TestCase): # @TODO: Finish this def test_properties(self): pass def test_self_collide(self): pass def test_self_distance(self): pass class TestBVHModel(unittest.TestCase): # @TODO: more & more thorough tests for BVH as it is so widely used. def setUp(self): # Create a box centered at [0.5, 1, 1,5] with extents[1, 2, 3] vertices = np.array([[0., 0., 0.], [0., 0., 3.], [0., 2., 0.], [0., 2., 3.], [1., 0., 0.], [1., 0., 3.], [1., 2., 0.], [1., 2., 3.]]) faces = np.array([[7, 3, 5], [5, 3, 1], [4, 0, 6], [6, 0, 2], [1, 0, 5], [5, 0, 4], [3, 2, 1], [1, 2, 0], [7, 6, 3], [3, 6, 2], [5, 4, 7], [7, 4, 6]]) bvh = fcl.BVHModel() bvh.beginModel(len(faces), len(vertices)) bvh.addSubModel(vertices, faces) bvh.endModel() self.box_mesh = bvh # Create a point cloud with 3 points points = [[0,0,0], [0,0,1], [0, 1, 0]] bvh = fcl.BVHModel() bvh.beginModel(0, len(points)) for pt in points: bvh.addVertex(*pt) # print("BVH result: ", bvh.endModel()) # print("#BVs: ", bvh.getNumBVs()) self.point_cloud = bvh def test_properties(self): self.assertEqual(self.point_cloud.getModelType(), fcl.BVHModelType.BVH_MODEL_POINTCLOUD) def test_mesh_self_collide(self): # Seperation on Y axis test_shape_self_collide( self.box_mesh, self.box_mesh, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([-0.999, 0, 0])), True) test_shape_self_collide( self.box_mesh, self.box_mesh, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([-1.001, 0, 0])), False) # Reflect the first box around the Z axis test_shape_self_collide( self.box_mesh, self.box_mesh, fcl.Transform(np.array([0, 0, 0, 1]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, -0.001, 0])), True) test_shape_self_collide( self.box_mesh, self.box_mesh, fcl.Transform(np.array([0, 0, 0, 1]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0.001, 0])), False) def test_mesh_self_distance(self): # @TODO: Why FCL returns 0 for two meshes in collision test_shape_self_distance( self.box_mesh, self.box_mesh, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([-0.999, 0, 0])), 0) test_shape_self_distance(self.box_mesh, self.box_mesh, fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), fcl.Transform(np.array([1, 0, 0, 0]), np.array([-1.001, 0, 0])), 0.001, atol=1e-10) def test_point_cloud_collide(self): box = fcl.Box(1,1,1) point_cloud_co = fcl.CollisionObject(self.point_cloud, fcl.Transform()) box_co = fcl.CollisionObject(box, fcl.Transform()) req = fcl.CollisionRequest() res = fcl.CollisionResult() #ret = fcl.collide(point_cloud_co, box_co, req, res) # test_shape_self_collide( # box, self.point_cloud, # fcl.Transform(np.array([0, 0, 0, 1]), np.array([0, -0.499, 0])), # fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), # True) # test_shape_self_collide( # box, self.point_cloud, # fcl.Transform(np.array([0, 0, 0, 1]), np.array([0, -0.501, 0])), # fcl.Transform(np.array([1, 0, 0, 0]), np.array([0, 0, 0])), # False) if __name__ == '__main__': unittest.main()
41.118605
96
0.477773
4,691
35,362
3.467491
0.055638
0.063199
0.041498
0.03369
0.832657
0.810218
0.798352
0.763617
0.750768
0.727653
0
0.087279
0.374017
35,362
859
97
41.166473
0.647542
0.073836
0
0.725857
0
0
0.000245
0
0
0
0
0.001164
0.05919
1
0.065421
false
0.010903
0.007788
0.001558
0.0919
0.001558
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
989e62a71985fea6dd7288c0c809bd6de4176a33
108
py
Python
radiosity/odradio/__init__.py
hptcad/radiosity
c4a7dba3acd42c4954880d5c7314c2cda14fdd1b
[ "MIT" ]
null
null
null
radiosity/odradio/__init__.py
hptcad/radiosity
c4a7dba3acd42c4954880d5c7314c2cda14fdd1b
[ "MIT" ]
null
null
null
radiosity/odradio/__init__.py
hptcad/radiosity
c4a7dba3acd42c4954880d5c7314c2cda14fdd1b
[ "MIT" ]
null
null
null
from odradio.examples import cylinder from odradio.examples import cone from odradio.examples import concave
36
37
0.87037
15
108
6.266667
0.466667
0.351064
0.606383
0.797872
0
0
0
0
0
0
0
0
0.101852
108
3
38
36
0.969072
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
7f2abab5a37e55b5e7dcff52f03ed04b63f5ee88
100,258
py
Python
test_geneve.py
peter-zyj/awsCLI
e51e983e0cb79790ee0d30ff055dd27e3c958435
[ "MIT" ]
null
null
null
test_geneve.py
peter-zyj/awsCLI
e51e983e0cb79790ee0d30ff055dd27e3c958435
[ "MIT" ]
null
null
null
test_geneve.py
peter-zyj/awsCLI
e51e983e0cb79790ee0d30ff055dd27e3c958435
[ "MIT" ]
null
null
null
import os, sys import re, hashlib, time import atexit from scapy.all import * import pytest from awsAPIv3 import aws from lib_yijun import * #pytest -v -s -m geneveASA --skip_updown --html=report.html --self-contained-html --metadata Version 9.17.0.20 def load_asa_config(asa_address, asa_jb_ip="20.0.250.10", debug=False): import pexpect # asa_address = "ssh -i 'testDog.pem' admin@3.142.241.180" conn = pexpect.spawn(asa_address) conn, result, cont = Geneve_reply(conn) conn.sendline("en") conn, result, cont = Geneve_reply(conn) # conn.sendline("copy http://20.0.250.10/geneve.smp disk0:/.") conn.sendline(f"copy http://{asa_jb_ip}/geneve.smp disk0:/.") conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug) conn.sendline("conf term") conn, result, cont = Geneve_reply(conn) conn.sendline("boot system disk0:/geneve.smp") conn, result, cont = Geneve_reply(conn) # if debug: # print("~~~~~~Debug~~~~~~~") # print('WAITED', wait(600)) # pytest.skip("Time to debug ASA error before reload") conn.sendline("reload") conn, result, cont = Geneve_reply(conn, debug=debug) print('WAITED', wait(600)) conn.close(); del conn conn = pexpect.spawn(asa_address) conn, result, cont = Geneve_reply(conn) conn.sendline("en") conn, result, cont = Geneve_reply(conn) conn.sendline("conf term") conn, result, cont = Geneve_reply(conn) # asa load pytest_day999.txt Geneve_load(conn, "pytest_day999.txt") conn.sendline("show run") conn, result, cont = Geneve_reply(conn) assert "20.0.1.101" in cont def asa_config(asa_address, lines, debug=False) -> tuple: import pexpect conn = None while not conn: conn = pexpect.spawn(asa_address) conn, result, cont = Geneve_reply(conn) conn.sendline("en") conn, result, cont = Geneve_reply(conn) conn.sendline("conf term") conn, result, cont = Geneve_reply(conn) # for line in lines.splitlines(): # if line: # conn.sendline(line) # conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline(lines) conn, result, cont = Geneve_reply(conn, debug=debug) conn.close() del conn return result, cont def ftd_hack(ftd_address, debug=False): import pexpect conn = None while not conn: conn = pexpect.spawn(ftd_address) conn, result, cont = Ocean_reply(conn, debug=debug) #firstlogin, finish all password go2fxos(conn, debug=debug) conn.sendline("configure manager delete") conn, result, cont = Ocean_reply(conn, debug=debug) time.sleep(5) conn.sendline("configure manager add 20.0.250.13 cisco") conn, result, cont = Ocean_reply(conn, debug=debug) go2ftd(conn, debug=debug) conn.sendline("en") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("show version") conn, result, cont = Ocean_reply(conn, debug=debug) p = "Serial Number: (.*)" sn = re.compile(p).findall(cont)[0].strip() if debug: print(sn) go2expert(conn, debug=debug) cli = f"sudo echo -n '1111222233334444{sn}' | md5sum>/mnt/disk0/enable_configure" conn.sendline(cli) conn, result, cont = Ocean_reply(conn, debug=debug) if debug: cli = "cat /mnt/disk0/enable_configure" conn.sendline(cli) conn, result, cont = Ocean_reply(conn, debug=debug) print (cont) go2ftd(conn, debug=debug) conn.sendline("en") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("") Ocean_reply(conn, debug=debug) conn.sendline(f"debug menu file-system 7") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("") Ocean_reply(conn, debug=debug) conn.sendline(f"conf term") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("") conn, result, cont = Ocean_reply(conn, debug=debug) if "firepower(config)#" not in cont: print("[Error][ftd_hack] failed to hack") return conn.sendline(f"end") Ocean_reply(conn, debug=debug) def ftd_config(ftd_address, lines, debug=False) -> tuple: import pexpect conn = None while not conn: conn = pexpect.spawn(ftd_address) conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("system support diagnostic-cli") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("end") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("en") conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("conf term") conn, result, cont = Ocean_reply(conn, debug=debug) for line in lines.splitlines(): if line: conn.sendline(line) conn, result, cont = Ocean_reply(conn, debug=debug) conn.sendline("end") Ocean_reply(conn, debug=debug) conn.close() del conn return result, cont def load_ftd_config(ftd_address, debug=False): import pexpect conn = pexpect.spawn(ftd_address) conn, result, cont = Ocean_reply(conn,debug=debug) go2ftd(conn, debug=debug) conn.sendline("en") conn, result, cont = Ocean_reply(conn,debug=debug) conn.sendline("conf term") conn, result, cont = Ocean_reply(conn,debug=debug) Ocean_load(conn, "pytest_day999FTD.txt",debug=debug) conn.sendline("show run") conn, result, cont = Ocean_reply(conn,debug=debug) assert "20.0.1.102" in cont @pytest.fixture(scope="module", autouse=True) def setup(request): skip_updown = request.config.option.skip_updown if skip_updown: print("\nsetup/teardown: skipped") return global setting, aws_obj setting = {} with open("/Users/yijunzhu/.aws/config_auto", "r") as f: cfg = f.read() with open("/Users/yijunzhu/.aws/credentials_auto", "r") as f: cda = f.read() setting["config"] = cfg setting["credentials"] = cda with open("/Users/yijunzhu/.aws/config", "r") as f: bytes_str = f.read().encode() md5_default_config = hashlib.md5(bytes_str).digest() with open("/Users/yijunzhu/.aws/credentials", "r") as f: bytes_str = f.read().encode() md5_default_credentials = hashlib.md5(bytes_str).digest() debug = request.config.option.trs aws_obj = aws(setting, debug=debug) atexit.register(aws_obj.close) aws_obj.load_deployment(fileName="aws_tb_pytest_west_1.config") aws_obj.start_deployment() Basic_miss_config() asa_ip = aws_obj.fetch_address("Test-1-169-EC2-ASA") asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" load_asa_config(asa_address, debug) def teardown(): aws_obj.close() with open("/Users/yijunzhu/.aws/config", "r") as f: bytes_str = f.read().encode() md5_default_config_v = hashlib.md5(bytes_str).digest() with open("/Users/yijunzhu/.aws/credentials", "r") as f: bytes_str = f.read().encode() md5_default_credentials_v = hashlib.md5(bytes_str).digest() assert md5_default_config == md5_default_config_v assert md5_default_credentials == md5_default_credentials_v request.addfinalizer(teardown) def Basic_miss_config(): print("####Basic_miss_config test####") app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run() cmd1 = "sudo ifconfig eth1 down" cmd2 = "sudo ifconfig eth1 10.0.1.10/24" cmd3 = "sudo ifconfig eth1 up" import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh2 = paramiko.SSHClient() ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") ssh2.connect(asa_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") _, stdout, _ = ssh.exec_command(f"{cmd1};{cmd2};{cmd3}") stdout.channel.recv_exit_status() _, stdout, _ = ssh2.exec_command(f"{cmd1};{cmd2};{cmd3}") stdout.channel.recv_exit_status() ssh.close() ssh2.close() #~~~~~~~~~~ ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # jb_ip = aws_obj.fetch_address("Test-1-169-EC2-App-JB") ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert "100% packet loss" in resp1 ssh.close() # @pytest.fixture(scope="module", params=["mod1", "mod2"]) # def sss(): # print("~~~~~sss~~~~~") # # @pytest.mark.shit # def test_shit(sss): # print("\nshit") # # @pytest.mark.shit # def test_shit2(): # print("shit2") @pytest.mark.clusterConfig def test_cluster_config(local_asa): asa_dict = local_asa print(asa_dict) key = "testCat.pem" asa_jb_ip = "30.0.250.20" job_list = [] from multiprocessing import Process timer_start = time.time() for name, ip in asa_dict.items(): asa_address = f"ssh -i '{key}' admin@{ip}" name = name.replace("#", "-") timer_p = Process(target=load_asa_config_multi, args=(asa_address, name, asa_jb_ip)) timer_p.start() job_list.append(timer_p) for job in job_list: job.join() job.close() end = time.time() - timer_start print("Info: time cost == ", end) #Load config #TBD @pytest.mark.geneveASA @pytest.mark.basic1to2 def test_Basic_PingGoogle(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert " 0% packet loss" in resp1 ssh.close() @pytest.mark.geneveASA @pytest.mark.basic2to1 def test_Basic_PingApp(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" access_list = f"access-list geneve extended permit icmp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, access_list) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command(f"ping {app_ip} -c 1") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert " 0% packet loss" in resp1 no_access_list = f"no access-list geneve extended permit icmp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_access_list) ssh.close() @pytest.mark.geneveASA @pytest.mark.install1to2 def test_apt_install_from_outside(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install net-tools'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break while True: _, stdout2, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ifconfig'") stdout2.channel.recv_exit_status() resp2 = "".join(stdout2.readlines()) if not resp2: continue else: break assert "10.0.1.101" in resp2 ssh.close() @pytest.mark.geneveASA @pytest.mark.install2to1 def test_apt_install_from_inside(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" access_list = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, access_list) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt update'") stdout.channel.recv_exit_status() _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install iperf -y'") stdout.channel.recv_exit_status() _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install apache2 -y'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break while True: _, stdout2, _ = ssh.exec_command(f"wget http://{app_ip}/index.html; ls index.html") stdout2.channel.recv_exit_status() resp2 = "".join(stdout2.readlines()) if not resp2: continue else: break assert "No such file or directory" not in resp2 no_access_list = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_access_list) ssh.close() @pytest.mark.pyserver def test_PYSERVER(skip_updown): print("skip_updown:", skip_updown) # asa_jb_address = "ssh -i 'testDog.pem' ubuntu@54.219.169.240" # asa_address = "ssh -i 'testDog.pem' ubuntu@54.241.122.28" # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@13.57.178.96:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@13.57.178.96 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@13.52.150.43:/home/ubuntu/.'" os.popen(cmd2).read() cmd3 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@13.57.48.179:/home/ubuntu/." os.popen(cmd3).read() # 2. run server file # cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ # "ubuntu@54.219.169.240 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ # "ubuntu@54.241.122.28 \'sudo screen -d -m sudo python3 Pytest_server.py\''" # os.popen(cmd3).read() @pytest.mark.geneveASA @pytest.mark.tcp @pytest.mark.tcp1to2 def test_TCP23_from_outside(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd_k).read() cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket() s.connect(("{app_ip}",23)) s.send("Yijun is coming".encode()) msg = s.recv(1024) print(msg) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'" resp = os.popen(cmd5).read() assert "[Pytest]TCP:23 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd7).read() no_acl_config = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.tcp @pytest.mark.tcp2to1 def test_TCP23_from_inside(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket() s.connect(("{app_jb_ip}",23)) s.send("Yijun is coming".encode()) msg = s.recv(1024) print(msg) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "test.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd4_2).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3;python3 test.py\''" resp = os.popen(cmd5).read() assert "[Pytest]TCP:23 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo rm -rf test.py\''" os.popen(cmd6_2).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3'" os.popen(cmd7).read() @pytest.fixture() def local_run(show=False): if "aws_obj" not in globals(): aws_obj = aws(record=False) app_jb = aws_obj.blind("Test-1-169-EC2-App-JB", "EC2INSTANCE", show=show) asa_jb = aws_obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE", show=show) asa = aws_obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE", show=show) app = aws_obj.blind("Test-1-169-EC2-App", "EC2INSTANCE", show=show) ftd = aws_obj.blind("Pytest-EC2-FTD", "EC2INSTANCE", show=show) fmc = aws_obj.blind("Pytest-EC2-FMC", "EC2INSTANCE", show=show) # ftd = aws_obj.blind("Pytest-EC2-FTD", "EC2INSTANCE", show=show) # fmc = aws_obj.blind("Pytest-EC2-FMC", "EC2INSTANCE", show=show) app_jb_ip = app_jb["public_ip"] asa_jb_ip = asa_jb["public_ip"] asa_ip = asa["public_ip"] app_ip = app["public_ip"] ftd_ip = ftd["public_ip"] fmc_ip = fmc["public_ip"] yield app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip aws_obj.close() @pytest.fixture() def acl_config(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) yield no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.udpYijun # def test_UDP666(acl_config): def test_UDP666(local_run, acl_config): # if "aws_obj" in globals(): # app_jb = aws_obj.blind("Test-1-169-EC2-App-JB", "EC2INSTANCE") # asa_jb = aws_obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE") # asa = aws_obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE") # app = aws_obj.blind("Test-1-169-EC2-App", "EC2INSTANCE") # # else: # aws_obj = aws(record=False) # app_jb = aws_obj.blind("Test-1-169-EC2-App-JB", "EC2INSTANCE") # asa_jb = aws_obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE") # asa = aws_obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE") # app = aws_obj.blind("Test-1-169-EC2-App", "EC2INSTANCE") # # app_jb_ip = app_jb["public_ip"] # asa_jb_ip = asa_jb["public_ip"] # asa_ip = asa["public_ip"] # app_ip = app["public_ip"] app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run # asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" # acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" # asa_config(asa_address, acl_config) # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) s.sendto("Yijun is coming".encode(), ("{app_ip}", 666)) msg = s.recvfrom(1024) print(msg[0]) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo python3 test.py'" resp = os.popen(cmd5).read() assert "[Pytest]UDP:666 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd7).read() # no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" # asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.udp1to2 def test_UDP_from_inside(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd_k).read() cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) s.sendto("Yijun is coming".encode(), ("{app_ip}", 666)) msg = s.recvfrom(1024) print(msg[0]) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'" resp = os.popen(cmd5).read() assert "[Pytest]UDP:666 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd7).read() no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.udp2to1 def test_UDP_from_outside(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'" os.popen(cmd3).read() # 3. test test = f""" import socket,os s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) s.sendto("Yijun is coming".encode(), ("{app_jb_ip}", 666)) msg = s.recvfrom(1024) print(msg[0]) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "test.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd4_2).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo python3 test.py; pkill python3\''" print(cmd5) resp = os.popen(cmd5).read() assert "[Pytest]UDP:666 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo rm -rf test.py\''" os.popen(cmd6_2).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3'" os.popen(cmd7).read() @pytest.mark.geneveASA @pytest.mark.iperfudp def test_iperf_udp(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s -u'" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip} -u\''" res = os.popen(cmd2).read() bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill iperf'" os.popen(cmd3).read() no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.iperfudpreverse def test_iperf_udp_reverse(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s -u\''" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip} -u;'" res = os.popen(cmd2).read() print("Iperf result:\n", res) bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo pkill iperf\''" os.popen(cmd3).read() no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.iperftcp def test_iperf_tcp(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s'" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip}\''" res = os.popen(cmd2).read() try: bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] except: bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill iperf'" os.popen(cmd3).read() no_acl_config = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.iperftcpreverse def test_iperf_tcp_reverse(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" acl_config = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, acl_config) cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s\''" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip}'" res = os.popen(cmd2).read() print("Iperf result:\n", res) try: bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] except: bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo pkill iperf\''" os.popen(cmd3).read() no_acl_config = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101" asa_config(asa_address, no_acl_config) @pytest.mark.geneveASA @pytest.mark.counter def test_udp_counter(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run cmd1 = "clear asp drop" cmd2 = "show asp drop frame geneve-invalid-udp-checksum" asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" asa_config(asa_address, cmd1) send(IP(dst="20.0.1.101") / UDP(sport=20001, dport=6081, chksum=0) / b'\x08\x00\x08') _, res = asa_config(asa_address, cmd2) assert "geneve-invalid-udp-checksum" in res @pytest.mark.geneveASA @pytest.mark.reset def test_tcp_counter(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo screen -d -m ssh root@{asa_jb_ip}\''" os.popen(cmd).read() cmd2 = "clear conn address 10.0.1.101" cmd3 = "show asp drop" cmd1 = "clear asp drop" asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" asa_config(asa_address, cmd1) asa_config(asa_address, cmd2) cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo pkill screen\''" os.popen(cmd).read() _, res = asa_config(asa_address, cmd3) assert "tcp-not-syn" in res @pytest.mark.geneveASA @pytest.mark.logserver def test_log_server(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh2 = paramiko.SSHClient() ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") ssh2.connect(asa_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") _, stdout, _ = ssh2.exec_command("sudo ifconfig eth1 down;sudo ifconfig eth1 20.0.1.10/24;sudo ifconfig eth1 up") stdout.channel.recv_exit_status() while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 10'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert "0% packet loss" in resp1 _, stdout, _ = ssh2.exec_command("sudo systemctl restart syslog") stdout.channel.recv_exit_status() while True: _, stdout, _ = ssh2.exec_command("tail -n 100 /var/log/syslog") stdout.channel.recv_exit_status() resp2 = "".join(stdout.readlines()) if not resp2: continue else: break assert "8.8.8.8" in resp2 ssh.close() ssh2.close() @pytest.mark.geneveASA @pytest.mark.genevedebug def test_debug_geneve(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run cmd1 = "debug geneve encapsulation" cmd2 = "debug geneve encapsulation 4" cmd3 = "debug geneve decapsulation" cmd4 = "debug geneve decapsulation 4" cmd5 = "debug geneve all" cmd_clean = "unde all" cmd_show = "show debug" asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" import pexpect conn = pexpect.spawn(asa_address) Geneve_reply(conn) conn.sendline("en") Geneve_reply(conn) conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve" not in res conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd1) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve encapsulation enabled at level 1" in res conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd2) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve encapsulation enabled at level 4" in res conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd3) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve decapsulation enabled at level 1" in res conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd4) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve decapsulation enabled at level 4" in res conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd5) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve encapsulation enabled at level 1" in res assert "debug geneve decapsulation enabled at level 1" in res conn.sendline(cmd_clean) Geneve_reply(conn) conn.sendline(cmd_show) _, _, res = Geneve_reply(conn) assert "debug geneve" not in res conn.close() del conn @pytest.mark.geneveASA @pytest.mark.metaserver def test_meta(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run cmd1 = "no aaa authentication listener http data-interface port www" cmd2 = "nat (data-interface,data-interface) source static gwlb interface destination static interface metadata service http80 http80" asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" asa_config(asa_address, cmd1) asa_config(asa_address, cmd2) time.sleep(20) import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert "0% packet loss" in resp1 ssh.close() @pytest.mark.geneveASA @pytest.mark.statistics def test_stats(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run cmd1 = "show interface vni 1" cmd2 = "show nve 1" asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" _, cont1_1 = asa_config(asa_address, cmd1) _, cont2_1 = asa_config(asa_address, cmd2) p1 = "(.*) packets input" p2 = "(.*) packets output" output_cmd1_1 = int(re.compile(p1).findall(cont1_1)[0]) output_cmd2_1 = int(re.compile(p2).findall(cont2_1)[0]) test_Basic_PingGoogle(local_run) _, cont1_2 = asa_config(asa_address, cmd1) _, cont2_2 = asa_config(asa_address, cmd2) output_cmd1_2 = int(re.compile(p1).findall(cont1_2)[0]) output_cmd2_2 = int(re.compile(p2).findall(cont2_2)[0]) assert output_cmd1_2 > output_cmd1_1 assert output_cmd2_2 > output_cmd2_1 @pytest.mark.geneveASA @pytest.mark.capture def test_capture(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run cmd0 = "no capture g" cmd1 = "clear cap /all" cmd2 = "cap g int ge trace" cmd3 = "show capture g | in icmp: echo request" asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" asa_config(asa_address, cmd0) asa_config(asa_address, cmd1) asa_config(asa_address, cmd2) test_Basic_PingGoogle(local_run) time.sleep(1) _, cont3 = asa_config(asa_address, cmd3) pNum = int(re.compile("\d+: ").findall(cont3)[0].strip().split(":")[0]) cmd4 = f"show capture g trace packet-number {pNum}" cmd5 = "no capture g" _, cont4 = asa_config(asa_address, cmd4) assert "Action: allow" in cont4 asa_config(asa_address, cmd5) @pytest.mark.replace @pytest.mark.reFTD def test_replace_FTD(): cont = ''' Del_Pytest_NWInterface_FTD1(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_Pytest-EC2-FTD Del_Pytest_NWInterface_FTD2(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_Pytest-EC2-FTD Del_Pytest_NWInterface_FTD3(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_Pytest-EC2-FTD Del_Pytest_SUB_Sec_2_DATA(TERMINATION): type: SUBNET action: bind_to: - Del_Pytest_NWInterface_FTD2 Del_Pytest_SUB_Sec_3_DATA(TERMINATION): type: SUBNET action: bind_to: - Del_Pytest_NWInterface_FTD3 Del_Pytest-AMI-FTD(TERMINATION): # id: ami-0d846ab5ee3c4de5a type: AMICOPY action: bind_to: - Del_Pytest-EC2-FTD Del_Pytest-EC2-FTD(TERMINATION): # id: i-0dfac8028eeb2df7c type: EC2INSTANCE Pytest-EC2-FTD(EC2INSTANCE): image-id: Pytest-AMI-FTD instance-type: d2.2xlarge key-name: testDog security-group-ids: Test-1-169_SG_Sec_MGMT count: 1 subnet-id: Test-1-169_SUB_Sec_MGMT associate-public-ip-address: None private-ip-address: 20.0.250.12 action: query_from: - Test-1-169_SUB_Sec_MGMT - Test-1-169_SG_Sec_MGMT bind_to: - Pytest-AMI-FTD - Del_Pytest-EC2-FTD cleanUP: True Pytest-AMI-FTD(AMICOPY): source-image-id: ami-025ac61040bca3a8e # source-image-id: ami-074379cc45251cfae source-region: us-west-2 region: us-west-1 name: ftdv action: bind_to: - Del_Pytest-AMI-FTD cleanUP: True Pytest_SUB_Sec_2_DATA(SUBNET): vpc-id: Test-1-169_VPC_Sec cidr-block: 20.0.2.0/24 availability-zone: '{Test-1-169_SUB_App_1_MGMT}' action: query_from: - Test-1-169_VPC_Sec - Test-1-169_SUB_App_1_MGMT bind_to: - Del_Pytest_SUB_Sec_2_DATA - Pytest_SUB_Sec_3_DATA cleanUP: True Pytest_SUB_Sec_3_DATA(SUBNET): vpc-id: Test-1-169_VPC_Sec cidr-block: 20.0.3.0/24 availability-zone: '{Test-1-169_SUB_App_1_MGMT}' action: query_from: - Test-1-169_VPC_Sec - Test-1-169_SUB_App_1_MGMT bind_to: - Del_Pytest_SUB_Sec_3_DATA cleanUP: True Pytest_NWInterface_FTD1(NETWORK_INTERFACE): subnet-id: Test-1-169_SUB_Sec_DATA description: pytest Data Network for ASA groups: Test-1-169_SG_Sec_DATA private-ip-address: 20.0.1.102 action: query_from: - Test-1-169_SUB_Sec_DATA - Test-1-169_SG_Sec_DATA bind_to: - Del_Pytest_NWInterface_FTD1 cleanUP: True Pytest_NWInterface_FTD2(NETWORK_INTERFACE): subnet-id: Pytest_SUB_Sec_2_DATA description: Test-1-169 Data Network2 for ASA groups: Test-1-169_SG_Sec_DATA private-ip-address: 20.0.2.102 action: query_from: - Test-1-169_SG_Sec_DATA bind_to: - Pytest_SUB_Sec_2_DATA - Del_Pytest_NWInterface_FTD2 cleanUP: True Pytest_NWInterface_FTD3(NETWORK_INTERFACE): subnet-id: Pytest_SUB_Sec_3_DATA description: Test-1-169 Data Network3 for ASA groups: Test-1-169_SG_Sec_DATA private-ip-address: 20.0.3.102 action: query_from: - Test-1-169_SG_Sec_DATA bind_to: - Pytest_SUB_Sec_3_DATA - Del_Pytest_NWInterface_FTD3 cleanUP: True Pytest_NWInterface_FTD_1_Bind(BIND): network-interface-id: Pytest_NWInterface_FTD1 instance-id: Pytest-EC2-FTD device-index: 1 action: bind_to: - Pytest_NWInterface_FTD1 - Pytest-EC2-FTD - Pytest_NWInterface_FTD_3_Bind cleanUP: True Pytest_NWInterface_FTD_2_Bind(BIND): network-interface-id: Pytest_NWInterface_FTD2 instance-id: Pytest-EC2-FTD device-index: 2 action: bind_to: - Pytest_NWInterface_FTD2 - Pytest-EC2-FTD - Pytest_NWInterface_FTD_1_Bind cleanUP: True Pytest_NWInterface_FTD_3_Bind(BIND): network-interface-id: Pytest_NWInterface_FTD3 instance-id: Pytest-EC2-FTD device-index: 3 action: bind_to: - Pytest_NWInterface_FTD3 - Pytest-EC2-FTD cleanUP: True ''' obj = aws(record=False, debug=True) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.reFTD2 def test_replace_FTD2(): cont = ''' Del_Test-Hybrid_NWInterface_FTD1(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_Test-Hybrid-EC2-FTD Del_Test-Hybrid_NWInterface_FTD2(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_Test-Hybrid-EC2-FTD Del_Test-Hybrid_NWInterface_FTD3(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_Test-Hybrid-EC2-FTD Del_Test-Hybrid_SUB_Sec_2_DATA(TERMINATION): type: SUBNET action: bind_to: - Del_Test-Hybrid_NWInterface_FTD2 Del_Test-Hybrid_SUB_Sec_3_DATA(TERMINATION): type: SUBNET action: bind_to: - Del_Test-Hybrid_NWInterface_FTD3 Del_Test-Hybrid-AMI-FTD(TERMINATION): # id: ami-0d846ab5ee3c4de5a type: AMICOPY action: bind_to: - Del_Test-Hybrid-EC2-FTD Del_Test-Hybrid-EC2-FTD(TERMINATION): # id: i-0dfac8028eeb2df7c type: EC2INSTANCE Test-Hybrid-EC2-FTD(EC2INSTANCE): image-id: Test-Hybrid-AMI-FTD instance-type: d2.2xlarge key-name: testDog security-group-ids: Test-Hybrid_SG_Sec_MGMT count: 1 subnet-id: Test-Hybrid_SUB_Sec_MGMT associate-public-ip-address: None private-ip-address: 20.0.250.12 action: query_from: - Test-Hybrid_SUB_Sec_MGMT - Test-Hybrid_SG_Sec_MGMT bind_to: - Test-Hybrid-AMI-FTD - Del_Test-Hybrid-EC2-FTD cleanUP: True Test-Hybrid-AMI-FTD(AMICOPY): source-image-id: ami-08473057344d9dd0d # source-image-id: ami-074379cc45251cfae source-region: us-west-2 region: us-west-1 name: ftdv action: bind_to: - Del_Test-Hybrid-AMI-FTD cleanUP: True Test-Hybrid_SUB_Sec_2_DATA(SUBNET): vpc-id: Test-Hybrid_VPC_Sec cidr-block: 20.0.2.0/24 availability-zone: '{Test-Hybrid_SUB_App_1_MGMT}' action: query_from: - Test-Hybrid_VPC_Sec - Test-Hybrid_SUB_App_1_MGMT bind_to: - Del_Test-Hybrid_SUB_Sec_2_DATA - Test-Hybrid_SUB_Sec_3_DATA cleanUP: True Test-Hybrid_SUB_Sec_3_DATA(SUBNET): vpc-id: Test-Hybrid_VPC_Sec cidr-block: 20.0.3.0/24 availability-zone: '{Test-Hybrid_SUB_App_1_MGMT}' action: query_from: - Test-Hybrid_VPC_Sec - Test-Hybrid_SUB_App_1_MGMT bind_to: - Del_Test-Hybrid_SUB_Sec_3_DATA cleanUP: True Test-Hybrid_NWInterface_FTD1(NETWORK_INTERFACE): subnet-id: Test-Hybrid_SUB_Sec_DATA description: pytest Data Network for ASA groups: Test-Hybrid_SG_Sec_DATA private-ip-address: 20.0.1.102 action: query_from: - Test-Hybrid_SUB_Sec_DATA - Test-Hybrid_SG_Sec_DATA bind_to: - Del_Test-Hybrid_NWInterface_FTD1 cleanUP: True Test-Hybrid_NWInterface_FTD2(NETWORK_INTERFACE): subnet-id: Test-Hybrid_SUB_Sec_2_DATA description: Test-Hybrid Data Network2 for ASA groups: Test-Hybrid_SG_Sec_DATA private-ip-address: 20.0.2.102 action: query_from: - Test-Hybrid_SG_Sec_DATA bind_to: - Test-Hybrid_SUB_Sec_2_DATA - Del_Test-Hybrid_NWInterface_FTD2 cleanUP: True Test-Hybrid_NWInterface_FTD3(NETWORK_INTERFACE): subnet-id: Test-Hybrid_SUB_Sec_3_DATA description: Test-Hybrid Data Network3 for ASA groups: Test-Hybrid_SG_Sec_DATA private-ip-address: 20.0.3.102 action: query_from: - Test-Hybrid_SG_Sec_DATA bind_to: - Test-Hybrid_SUB_Sec_3_DATA - Del_Test-Hybrid_NWInterface_FTD3 cleanUP: True Test-Hybrid_NWInterface_FTD_1_Bind(BIND): network-interface-id: Test-Hybrid_NWInterface_FTD1 instance-id: Test-Hybrid-EC2-FTD device-index: 1 action: bind_to: - Test-Hybrid_NWInterface_FTD1 - Test-Hybrid-EC2-FTD - Test-Hybrid_NWInterface_FTD_3_Bind cleanUP: True Test-Hybrid_NWInterface_FTD_2_Bind(BIND): network-interface-id: Test-Hybrid_NWInterface_FTD2 instance-id: Test-Hybrid-EC2-FTD device-index: 2 action: bind_to: - Test-Hybrid_NWInterface_FTD2 - Test-Hybrid-EC2-FTD - Test-Hybrid_NWInterface_FTD_1_Bind cleanUP: True Test-Hybrid_NWInterface_FTD_3_Bind(BIND): network-interface-id: Test-Hybrid_NWInterface_FTD3 instance-id: Test-Hybrid-EC2-FTD device-index: 3 action: bind_to: - Test-Hybrid_NWInterface_FTD3 - Test-Hybrid-EC2-FTD cleanUP: True ''' obj = aws(record=False, debug=True) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.reFMC2 def test_replace_FMC2(): cont = ''' Del_Test-Hybrid-EC2-FMC(TERMINATION): # id: i-0dfac8028eeb2df7c type: EC2INSTANCE Del_Test-Hybrid-AMI-FMC(TERMINATION): # id: ami-0d846ab5ee3c4de5a type: AMICOPY Test-Hybrid-EC2-FMC(EC2INSTANCE): image-id: Test-Hybrid-AMI-FMC instance-type: d2.2xlarge key-name: testDog security-group-ids: Test-Hybrid_SG_Sec_MGMT count: 1 subnet-id: Test-Hybrid_SUB_Sec_MGMT associate-public-ip-address: None private-ip-address: 20.0.250.13 action: query_from: - Test-Hybrid_SUB_Sec_MGMT - Test-Hybrid_SG_Sec_MGMT bind_to: - Test-Hybrid-AMI-FMC - Del_Test-Hybrid-EC2-FMC cleanUP: True Test-Hybrid-AMI-FMC(AMICOPY): source-image-id: ami-0e8f534eeea33536b source-region: us-west-2 region: us-west-1 name: fmcv action: bind_to: - Del_Test-Hybrid-AMI-FMC cleanUP: True ''' obj = aws(record=False, debug=True) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.replace @pytest.mark.reFMC def test_replace_FMC(): cont = ''' Del_Pytest-EC2-FMC(TERMINATION): # id: i-0dfac8028eeb2df7c type: EC2INSTANCE Del_Pytest-AMI-FMC(TERMINATION): # id: ami-0d846ab5ee3c4de5a type: AMICOPY Pytest-EC2-FMC(EC2INSTANCE): image-id: Pytest-AMI-FMC instance-type: d2.2xlarge key-name: testDog security-group-ids: Test-1-169_SG_Sec_MGMT count: 1 subnet-id: Test-1-169_SUB_Sec_MGMT associate-public-ip-address: None private-ip-address: 20.0.250.13 action: query_from: - Test-1-169_SUB_Sec_MGMT - Test-1-169_SG_Sec_MGMT bind_to: - Pytest-AMI-FMC - Del_Pytest-EC2-FMC cleanUP: True Pytest-AMI-FMC(AMICOPY): source-image-id: ami-0e8f534eeea33536b source-region: us-west-2 region: us-west-1 name: fmcv action: bind_to: - Del_Pytest-AMI-FMC cleanUP: True ''' obj = aws(record=False, debug=True) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.reASA def test_replace_ASA(): cont = ''' Del_pytest_ASA_New(TERMINATION): type: EC2INSTANCE Del_pytest_NWInterface_ASA_New(TERMINATION): type: NETWORK_INTERFACE action: bind_to: - Del_pytest_ASA_New pytest_ASA_New(EC2INSTANCE): image-id: ami-01cab33393210e391 instance-type: c5.xlarge key-name: testDog security-group-ids: Test-1-169_SG_Sec_MGMT count: 1 subnet-id: Test-1-169_SUB_Sec_MGMT associate-public-ip-address: None private-ip-address: 20.0.250.12 user-data: file://pytest_day0.txt action: query_from: - Test-1-169_SUB_Sec_MGMT - Test-1-169_SG_Sec_MGMT bind_to: - Del_pytest_ASA_New cleanUP: True pytest_NWInterface_ASA_New(NETWORK_INTERFACE): subnet-id: Test-1-169_SUB_Sec_DATA description: Test-1-169 Data Network for ASA groups: Test-1-169_SG_Sec_DATA private-ip-address: 20.0.1.102 action: query_from: - Test-1-169_SG_Sec_DATA - Test-1-169_SUB_Sec_DATA bind_to: - Del_pytest_NWInterface_ASA_New cleanUP: True pytest_NWInterface_ASA_Bind(BIND): network-interface-id: pytest_NWInterface_ASA_New instance-id: pytest_ASA_New device-index: 1 action: bind_to: - pytest_NWInterface_ASA_New - pytest_ASA_New cleanUP: True ''' obj = aws(record=False) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.addasa def test_addASA(): cont = ''' pytest_ASA_New(EC2INSTANCE): image-id: ami-01cab33393210e391 instance-type: c5.xlarge key-name: testDog security-group-ids: Test-1-169_SG_Sec_MGMT count: 1 subnet-id: Test-1-169_SUB_Sec_MGMT associate-public-ip-address: None private-ip-address: 20.0.250.12 user-data: file://pytest_day0.txt action: query_from: - Test-1-169_SUB_Sec_MGMT - Test-1-169_SG_Sec_MGMT cleanUP: True pytest_NWInterface_ASA_New(NETWORK_INTERFACE): subnet-id: Test-1-169_SUB_Sec_DATA description: Test-1-169 Data Network for ASA groups: Test-1-169_SG_Sec_DATA private-ip-address: 20.0.1.102 action: query_from: - Test-1-169_SG_Sec_DATA - Test-1-169_SUB_Sec_DATA cleanUP: True pytest_NWInterface_ASA_Bind(BIND): network-interface-id: pytest_NWInterface_ASA_New instance-id: pytest_ASA_New device-index: 1 action: bind_to: - pytest_NWInterface_ASA_New - pytest_ASA_New cleanUP: True ''' setting = {} cfg = {"default": {"region": "us-west-1", "output": "yaml"}} cda = {"default": {"aws_access_key_id": "AKIAWMUP3NI4ET7YU6AN", "aws_secret_access_key": "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"}} setting["config"] = cfg setting["credentials"] = cda obj = aws(setting, record=False) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() # asa_ip = obj.fetch_address("Auto_ASA_New") # asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}" # # load_asa_config(asa_address, debug=False) # @pytest.mark.addftd # def test_FTD(): # cont = ''' # Pytest-EC2-FTD(EC2INSTANCE): # image-id: Pytest-AMI-FTD # instance-type: d2.2xlarge # key-name: testDog # security-group-ids: Test-1-169_SG_Sec_MGMT # count: 1 # subnet-id: Test-1-169_SUB_Sec_MGMT # associate-public-ip-address: None # private-ip-address: 20.0.250.12 # action: # query_from: # - Test-1-169_SUB_Sec_MGMT # - Test-1-169_SG_Sec_MGMT # bind_to: # - Pytest-AMI-FTD # cleanUP: True # # Pytest-AMI-FTD(AMICOPY): # source-image-id: ami-05a840fdc851de7cb # source-region: us-east-2 # region: us-west-1 # name: ftdv # action: # cleanUP: True # # Pytest_SUB_Sec_2_DATA(SUBNET): # vpc-id: Test-1-169_VPC_Sec # cidr-block: 20.0.2.0/24 # availability-zone: '{Test-1-169_SUB_App_1_MGMT}' # action: # query_from: # - Test-1-169_VPC_Sec # - Test-1-169_SUB_App_1_MGMT # cleanUP: True # Pytest_SUB_Sec_3_DATA(SUBNET): # vpc-id: Test-1-169_VPC_Sec # cidr-block: 20.0.3.0/24 # availability-zone: '{Test-1-169_SUB_App_1_MGMT}' # action: # query_from: # - Test-1-169_VPC_Sec # - Test-1-169_SUB_App_1_MGMT # cleanUP: True # # Pytest_NWInterface_FTD1(NETWORK_INTERFACE): # subnet-id: Test-1-169_SUB_Sec_DATA # description: pytest Data Network for ASA # groups: Test-1-169_SG_Sec_DATA # private-ip-address: 20.0.1.102 # action: # query_from: # - Test-1-169_SUB_Sec_DATA # - Test-1-169_SG_Sec_DATA # cleanUP: True # Pytest_NWInterface_FTD2(NETWORK_INTERFACE): # subnet-id: Pytest_SUB_Sec_2_DATA # description: Test-1-169 Data Network2 for ASA # groups: Test-1-169_SG_Sec_DATA # private-ip-address: 20.0.2.102 # action: # query_from: # - Test-1-169_SG_Sec_DATA # bind_to: # - Pytest_SUB_Sec_2_DATA # cleanUP: True # Pytest_NWInterface_FTD3(NETWORK_INTERFACE): # subnet-id: Pytest_SUB_Sec_3_DATA # description: Test-1-169 Data Network3 for ASA # groups: Test-1-169_SG_Sec_DATA # private-ip-address: 20.0.3.102 # action: # query_from: # - Test-1-169_SG_Sec_DATA # bind_to: # - Pytest_SUB_Sec_3_DATA # cleanUP: True # # Pytest_NWInterface_FTD_1_Bind(BIND): # network-interface-id: Pytest_NWInterface_FTD1 # instance-id: Pytest-EC2-FTD # device-index: 1 # action: # bind_to: # - Pytest_NWInterface_FTD1 # - Pytest-EC2-FTD # cleanUP: True # Pytest_NWInterface_FTD_2_Bind(BIND): # network-interface-id: Pytest_NWInterface_FTD2 # instance-id: Pytest-EC2-FTD # device-index: 2 # action: # bind_to: # - Pytest_NWInterface_FTD2 # - Pytest-EC2-FTD # cleanUP: True # Pytest_NWInterface_FTD_3_Bind(BIND): # network-interface-id: Pytest_NWInterface_FTD3 # instance-id: Pytest-EC2-FTD # device-index: 3 # action: # bind_to: # - Pytest_NWInterface_FTD3 # - Pytest-EC2-FTD # cleanUP: True # ''' # obj = aws(debug=False) # atexit.register(obj.close) # # obj.load_deployment(content=cont) # obj.start_deployment() # # # @pytest.mark.addfmc # def test_FMC(): # cont = ''' # Pytest-EC2-FMC(EC2INSTANCE): # image-id: Pytest-AMI-FMC # instance-type: d2.2xlarge # key-name: testDog # security-group-ids: Test-1-169_SG_Sec_MGMT # count: 1 # subnet-id: Test-1-169_SUB_Sec_MGMT # associate-public-ip-address: None # private-ip-address: 20.0.250.13 # action: # query_from: # - Test-1-169_SUB_Sec_MGMT # - Test-1-169_SG_Sec_MGMT # bind_to: # - Pytest-AMI-FMC # cleanUP: True # # Pytest-AMI-FMC(AMICOPY): # source-image-id: ami-06aac12eabffe610d # source-region: us-east-2 # region: us-west-1 # name: fmcv # action: # cleanUP: True # ''' # obj = aws(debug=True) # atexit.register(obj.close) # # obj.load_deployment(content=cont) # obj.start_deployment() @pytest.mark.regASA def test_reg_asa(): cont=""" Del_Test-1-169_TG_ASA(TERMINATION): target-group-arn: Test-1-169-TG targets: Id=Test-1-169_NWInterface_ASA type: REGISTER action: query_from: - Test-1-169-TG - Test-1-169_NWInterface_ASA Del_Test-1-169_TG_FTD(TERMINATION): target-group-arn: Test-1-169-TG targets: Id=Pytest_NWInterface_FTD1 type: REGISTER action: query_from: - Test-1-169-TG - Pytest_NWInterface_FTD1 Test-1-169_TG_Instance(REGISTER): target-group-arn: Test-1-169-TG targets: Id=Test-1-169_NWInterface_ASA action: query_from: - Test-1-169-TG - Test-1-169_NWInterface_ASA bind_to: - Del_Test-1-169_TG_FTD - Del_Test-1-169_TG_ASA cleanUP: True """ obj = aws(debug=True) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.regFTD def test_reg_ftd(): cont = """ Del_Test-1-169_TG_ASA(TERMINATION): target-group-arn: Test-1-169-TG targets: Id=Test-1-169_NWInterface_ASA type: REGISTER action: query_from: - Test-1-169-TG - Test-1-169_NWInterface_ASA Del_Test-1-169_TG_FTD(TERMINATION): target-group-arn: Test-1-169-TG targets: Id=Pytest_NWInterface_FTD1 type: REGISTER action: query_from: - Test-1-169-TG - Pytest_NWInterface_FTD1 Test-1-169_TG_Instance(REGISTER): target-group-arn: Test-1-169-TG targets: Id=Pytest_NWInterface_FTD1 action: query_from: - Test-1-169-TG - Pytest_NWInterface_FTD1 bind_to: - Del_Test-1-169_TG_FTD - Del_Test-1-169_TG_ASA cleanUP: True """ obj = aws(debug=True) atexit.register(obj.close) obj.load_deployment(content=cont) obj.start_deployment() @pytest.mark.hackFTD def test_ftd_backdoor(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" ftd_hack(ftd_address) cmd = "conf term" res, cont = ftd_config(ftd_address, cmd) assert "firepower(config)#" in cont @pytest.mark.FMCreg def test_fmc_reg(local_run): # def test_fmc_reg(): from selenium import webdriver from selenium.webdriver.common.by import By #need to manually ssh login FMCv first, which help to setup admin/Cisco123!@# (default one:Cisco@13) timer = 5 app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # fmc_ip = "52.53.155.170" driver = webdriver.Chrome("/Users/yijunzhu/PycharmProjects/iTest/Geneve/chromedriver") try: driver.get(f"https://{fmc_ip}/ui/login") driver.find_element(By.ID, "details-button").click() driver.find_element(By.ID, "proceed-link").click() except: pass time.sleep(timer)# wait, otherwise can't find bd-2 driver.get(f"https://{fmc_ip}/ui/login") driver.find_element(By.ID, "bd-2").send_keys("admin") driver.find_element(By.ID, "bd-5").send_keys("Cisco123!@#") driver.find_element(By.CSS_SELECTOR, ".atomic-btn").click() time.sleep(timer) try: driver.find_element(By.CSS_SELECTOR, ".atomic-btn:nth-child(2)").click() except: pass time.sleep(timer) driver.find_element(By.LINK_TEXT, "Devices").click() time.sleep(timer) driver.find_element(By.LINK_TEXT, "Device Management").click() time.sleep(timer) driver.find_element(By.CSS_SELECTOR, "#gwt-debug-device_management-add_dropdown-add .x-btn-text").click() driver.find_element(By.ID, "gwt-debug-device_management-device-add").click() time.sleep(timer) driver.find_element(By.ID, "gwt-debug-device_registration-host-text_field-input").send_keys("20.0.250.12") driver.find_element(By.ID, "gwt-debug-device_registration-display_name-text_field-input").click() driver.find_element(By.ID, "gwt-debug-device_registration-registration_key-text_field-input").send_keys("cisco") driver.find_element(By.ID, "gwt-debug-device_registration-access_control_policy-combobox-input").click() time.sleep(timer) driver.find_element(By.XPATH, '//div[text()="default_yijun"]').click() driver.find_element(By.ID, "gwt-debug-device_registration-license_tiers-combobox-input").click() time.sleep(timer) driver.find_element(By.XPATH, '//div[text()="FTDv20 - Tiered (Core 4 / 8 GB)"]').click() time.sleep(timer) check1 = driver.find_element(By.XPATH, '//fieldset[@class=" x-fieldset x-component"]//label[text()="Malware"]') check2 = driver.find_element(By.XPATH, '//fieldset[@class=" x-fieldset x-component"]//label[text()="Threat"]') check3 = driver.find_element(By.XPATH, '//fieldset[@class=" x-fieldset x-component"]//label[text()="URL Filtering"]') check1_id = str(check1.get_attribute("htmlfor")) check2_id = str(check2.get_attribute("htmlfor")) check3_id = str(check3.get_attribute("htmlfor")) driver.find_element(By.ID, check1_id).click() driver.find_element(By.ID, check2_id).click() driver.find_element(By.ID, check3_id).click() time.sleep(timer) driver.find_element(By.CSS_SELECTOR, "#gwt-debug-device_registration-register-button .x-btn-text").click() time.sleep(5) @pytest.mark.FTDconfig def test_ftd_config(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" load_ftd_config(ftd_address, debug=False) @pytest.mark.geneveFTD @pytest.mark.FTDmetaserver @pytest.mark.FTDbasic1to2 def test_Basic_PingGoogle_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # test_reg_ftd() # print('WAIT for FTD register', wait(90)) import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert "0% packet loss" in resp1 ssh.close() @pytest.mark.geneveFTD @pytest.mark.FTDbasic2to1 def test_Basic_PingApp_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command(f"ping {app_ip} -c 1") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert "0% packet loss" in resp1 ssh.close() @pytest.mark.geneveFTD @pytest.mark.FTDinstall1to2 def test_apt_install_from_outside_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install net-tools'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break while True: _, stdout2, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ifconfig'") stdout2.channel.recv_exit_status() resp2 = "".join(stdout2.readlines()) if not resp2: continue else: break assert "10.0.1.101" in resp2 ssh.close() @pytest.mark.geneveFTD @pytest.mark.FTDinstall2to1 def test_apt_install_from_inside_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt update'") stdout.channel.recv_exit_status() _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install iperf -y'") stdout.channel.recv_exit_status() _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install apache2 -y'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break while True: _, stdout2, _ = ssh.exec_command(f"wget http://{app_ip}/index.html; ls index.html") stdout2.channel.recv_exit_status() resp2 = "".join(stdout2.readlines()) if not resp2: continue else: break assert "No such file or directory" not in resp2 ssh.close() @pytest.mark.geneveFTD @pytest.mark.FTDtcp1to2 def test_TCP23_from_outside_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd_k).read() cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket() s.connect(("{app_ip}",23)) s.send("Yijun is coming".encode()) msg = s.recv(1024) print(msg) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'" resp = os.popen(cmd5).read() assert "[Pytest]TCP:23 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd7).read() @pytest.mark.geneveFTD @pytest.mark.FTDtcp2to1 def test_TCP23_from_inside_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket() s.connect(("{app_jb_ip}",23)) s.send("Yijun is coming".encode()) msg = s.recv(1024) print(msg) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "test.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd4_2).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3;python3 test.py\''" resp = os.popen(cmd5).read() assert "[Pytest]TCP:23 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo rm -rf test.py\''" os.popen(cmd6_2).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3'" os.popen(cmd7).read() @pytest.mark.geneveFTD @pytest.mark.FTDudpYijun def test_UDP666_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) s.sendto("Yijun is coming".encode(), ("{app_ip}", 666)) msg = s.recvfrom(1024) print(msg[0]) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo python3 test.py'" resp = os.popen(cmd5).read() assert "[Pytest]UDP:666 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd7).read() @pytest.mark.geneveFTD @pytest.mark.FTDudp1to2 def test_UDP_from_inside_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd_k).read() cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) s.sendto("Yijun is coming".encode(), ("{app_ip}", 666)) msg = s.recvfrom(1024) print(msg[0]) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'" resp = os.popen(cmd5).read() assert "[Pytest]UDP:666 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo pkill python3\''" os.popen(cmd7).read() @pytest.mark.geneveFTD @pytest.mark.FTDudp2to1 def test_UDP_from_outside_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run # 1. transfer server file cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd2).read() # 2. run server file cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'" os.popen(cmd3).read() # 3. test test = f""" import socket s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) s.sendto("Yijun is coming".encode(), ("{app_jb_ip}", 666)) msg = s.recvfrom(1024) print(msg[0]) """ with open("test.py", "w+") as f: f.write(test) cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/." os.popen(cmd4).read() cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "test.py ubuntu@10.0.1.101:/home/ubuntu/.'" os.popen(cmd4_2).read() cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo python3 test.py; pkill python3\''" resp = os.popen(cmd5).read() assert "[Pytest]UDP:666 is back!" in resp # # terminate server cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'" os.popen(cmd6).read() cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ "ubuntu@10.0.1.101 \'sudo rm -rf test.py\''" os.popen(cmd6_2).read() cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill python3'" os.popen(cmd7).read() @pytest.mark.geneveFTD @pytest.mark.FTDiperfudp def test_iperf_udp_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s -u'" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip} -u\''" res = os.popen(cmd2).read() bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill iperf'" os.popen(cmd3).read() @pytest.mark.geneveFTD @pytest.mark.FTDiperfudpreverse def test_iperf_udp_reverse_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s -u\''" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip} -u;'" res = os.popen(cmd2).read() print("Iperf result:\n", res) bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo pkill iperf\''" os.popen(cmd3).read() @pytest.mark.geneveFTD @pytest.mark.FTDiperftcp def test_iperf_tcp_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s'" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip}\''" res = os.popen(cmd2).read() print(res) try: bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] except: bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo pkill iperf'" os.popen(cmd3).read() @pytest.mark.geneveFTD @pytest.mark.FTDiperftcpreverse def test_iperf_tcp_reverse_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s\''" os.popen(cmd1).read() cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip}'" res = os.popen(cmd2).read() print("Iperf result:\n", res) try: bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0] except: bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0] assert float(bd) > 0 cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo pkill iperf\''" os.popen(cmd3).read() @pytest.mark.geneveFTD @pytest.mark.FTDcounter def test_udp_counter_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "clear asp drop" cmd2 = "show asp drop frame geneve-invalid-udp-checksum" ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" ftd_config(ftd_address, cmd1) send(IP(dst="20.0.1.102") / UDP(sport=20001, dport=6081, chksum=0) / b'\x08\x00\x08') _, res = ftd_config(ftd_address, cmd2) assert "geneve-invalid-udp-checksum" in res @pytest.mark.geneveFTD @pytest.mark.FTDreset def test_tcp_counter_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo screen -d -m ssh root@{asa_jb_ip}\''" os.popen(cmd).read() cmd2 = "clear conn address 10.0.1.101" cmd3 = "show asp drop" cmd1 = "clear asp drop" ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" ftd_config(ftd_address, cmd1) ftd_config(ftd_address, cmd2) cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"ubuntu@10.0.1.101 \'sudo pkill screen\''" os.popen(cmd).read() _, res = ftd_config(ftd_address, cmd3) assert "tcp-not-syn" in res @pytest.mark.geneveFTD @pytest.mark.FTDlogserver def test_log_server_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run config = ''' logging enable logging buffer-size 52428800 logging buffered debugging logging trap debugging logging host data-interface 20.0.1.10 logging message 302020 ''' ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" ftd_config(ftd_address, config) import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh2 = paramiko.SSHClient() ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") ssh2.connect(asa_jb_ip, username='ubuntu', password='', key_filename="testDog.pem") _, stdout, _ = ssh2.exec_command("sudo ifconfig eth1 down;sudo ifconfig eth1 20.0.1.10/24;sudo ifconfig eth1 up") stdout.channel.recv_exit_status() while True: _, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 10'") stdout.channel.recv_exit_status() resp1 = "".join(stdout.readlines()) if not resp1: continue else: break assert "0% packet loss" in resp1 _, stdout, _ = ssh2.exec_command("sudo systemctl restart syslog") stdout.channel.recv_exit_status() while True: _, stdout, _ = ssh2.exec_command("tail -n 100 /var/log/syslog") stdout.channel.recv_exit_status() resp2 = "".join(stdout.readlines()) if not resp2: continue else: break assert "8.8.8.8" in resp2 ssh.close() ssh2.close() @pytest.mark.geneveFTD @pytest.mark.FTDgenevedebug def test_debug_geneve_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "debug geneve encapsulation" cmd2 = "debug geneve encapsulation 4" cmd3 = "debug geneve decapsulation" cmd4 = "debug geneve decapsulation 4" cmd5 = "debug geneve all" cmd_clean = "unde all" cmd_show = "show debug" ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" import pexpect conn = pexpect.spawn(ftd_address) Ocean_reply(conn) go2ftd(conn) conn.sendline("en") Ocean_reply(conn) conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve" not in res conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd1) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve encapsulation enabled at level 1" in res conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd2) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve encapsulation enabled at level 4" in res conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd3) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve decapsulation enabled at level 1" in res conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd4) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve decapsulation enabled at level 4" in res conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd5) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve encapsulation enabled at level 1" in res assert "debug geneve decapsulation enabled at level 1" in res conn.sendline(cmd_clean) Ocean_reply(conn) conn.sendline(cmd_show) _, _, res = Ocean_reply(conn) assert "debug geneve" not in res conn.close() del conn @pytest.mark.geneveFTD @pytest.mark.FTDstatistics def test_stats_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd1 = "show interface vni 1" cmd2 = "show nve 1" ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" _, cont1_1 = ftd_config(ftd_address, cmd1) _, cont2_1 = ftd_config(ftd_address, cmd2) p1 = "(.*) packets input" p2 = "(.*) packets output" output_cmd1_1 = int(re.compile(p1).findall(cont1_1)[0]) output_cmd2_1 = int(re.compile(p2).findall(cont2_1)[0]) test_Basic_PingGoogle_FTD(local_run) _, cont1_2 = ftd_config(ftd_address, cmd1) _, cont2_2 = ftd_config(ftd_address, cmd2) output_cmd1_2 = int(re.compile(p1).findall(cont1_2)[0]) output_cmd2_2 = int(re.compile(p2).findall(cont2_2)[0]) assert output_cmd1_2 > output_cmd1_1 assert output_cmd2_2 > output_cmd2_1 @pytest.mark.geneveFTD @pytest.mark.FTDcapture def test_capture_FTD(local_run): app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run cmd0 = "no capture g" cmd1 = "clear cap /all" cmd2 = "cap g int ge trace" cmd3 = "show capture g | in icmp: echo request" ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}" ftd_config(ftd_address, cmd0) ftd_config(ftd_address, cmd1) ftd_config(ftd_address, cmd2) test_Basic_PingGoogle_FTD(local_run) time.sleep(1) _, cont3 = ftd_config(ftd_address, cmd3) pNum = int(re.compile("\d+: ").findall(cont3)[0].strip().split(":")[0]) cmd4 = f"show capture g trace packet-number {pNum} | in Action:" cmd5 = "no capture g" _, cont4 = ftd_config(ftd_address, cmd4) assert "Action: allow" in cont4 ftd_config(ftd_address, cmd5) @pytest.mark.updowngrade def test_image_replacement(keyFile, trs): print("keyFile::", keyFile) print("Debug::", trs) obj = aws(record=False) res1 = obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE") res2 = res = obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE") # backup config in ASA cmd = "show run" asa_address = f"ssh -i 'testDog.pem' admin@{res1['public_ip']}" old_config = asa_config(asa_address, cmd) assert old_config != "" # transfer image to asa new_image = "geneve_new.smp" command = f"scp -i {keyFile} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \ f"{new_image} ubuntu@{res2['public_ip']}:/var/www/html/." timer("start") os.popen(command).read() timer("stop") import pexpect debug = trs conn = pexpect.spawn(asa_address) conn, result, cont = Geneve_reply(conn) conn.sendline("en") conn, result, cont = Geneve_reply(conn) print("debug:start copy") conn.sendline("copy http://20.0.250.10/geneve_new.smp disk0:/geneve_new.smp") conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug) print("debug:end copy") # print old version conn.sendline("show version") conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug) print("Old Version::", cont) # reload asa conn.sendline("boot system disk0:/geneve_new.smp") conn, result, cont = Geneve_reply(conn) conn.sendline("reload") conn, result, cont = Geneve_reply(conn, debug=debug) print('WAITED', wait(600)) conn.close(); del conn # print new version conn = pexpect.spawn(asa_address) conn, result, cont = Geneve_reply(conn) conn.sendline("en") conn, result, cont = Geneve_reply(conn) conn.sendline("show version") conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug) print("New Version::", cont) # config is same as before/after cmd = "show run" asa_address = f"ssh -i 'testDog.pem' admin@{res['public_ip']}" new_config = asa_config(asa_address, cmd) temp = new_config.replace("geneve_new.smp", "geneve.smp") assert temp == old_config pass if __name__ == '__main__': pytest.main(["-q", "-s", "-ra", "test_geneve.py"]) # capture abc interface data-interface # show capture abc packet-number 18 detail decode # # copy /pcap capture:abc abc.pcap # # copy disk0:/abc.pcap scp://root@1.2.3.4:/home/ubuntu/. ####################### # access-list geneve extended permit icmp host 3.101.116.24 host 10.0.1.101 # access-list geneve extended permit tcp host 3.101.116.24 host 10.0.1.101 # access-list geneve extended permit udp host 3.101.116.24 host 10.0.1.101 ####################### # direct vs roundway # aaa authentication listener http data-interface port www # ~~~~exclusive~~~~ # object network gwlb-net # subnet 20.0.1.0 255.255.255.0 # # object-group network gwlb # network-object object gwlb-net # # object-group network metadata # network-object host 20.0.1.10 # # object service http80 # service tcp destination eq www # # nat (data-interface,data-interface) source static gwlb interface destination static interface metadata service http80 http80 #
33.198013
137
0.662042
14,472
100,258
4.397043
0.045882
0.016029
0.037511
0.073467
0.908618
0.888927
0.867917
0.854763
0.834302
0.823223
0
0.037626
0.200762
100,258
3,019
138
33.20901
0.756508
0.080682
0
0.801768
0
0.040949
0.50116
0.201163
0
0
0
0
0.030247
1
0.029316
false
0.008841
0.018613
0
0.049791
0.014891
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7f2d634c3cb024afde0fdde686ce6aac37b32af2
126
py
Python
delphi/utils/__init__.py
mikiec84/delphi
2e517f21e76e334c7dfb14325d25879ddf26d10d
[ "Apache-2.0" ]
25
2018-03-03T11:57:57.000Z
2022-01-16T21:19:54.000Z
delphi/utils/__init__.py
mikiec84/delphi
2e517f21e76e334c7dfb14325d25879ddf26d10d
[ "Apache-2.0" ]
385
2018-02-21T16:52:06.000Z
2022-02-17T07:44:56.000Z
delphi/utils/__init__.py
mikiec84/delphi
2e517f21e76e334c7dfb14325d25879ddf26d10d
[ "Apache-2.0" ]
19
2018-03-20T01:08:11.000Z
2021-09-29T01:04:49.000Z
from delphi.utils.fp import * from delphi.utils.web import * from delphi.utils.shell import * from delphi.utils.misc import *
25.2
32
0.777778
20
126
4.9
0.4
0.408163
0.612245
0.642857
0
0
0
0
0
0
0
0
0.126984
126
4
33
31.5
0.890909
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
7f7bc98efc6c3ca51152633804861870022f31d3
4,569
py
Python
prediction_service/generate_economic_indicator.py
CheranMahalingam/Forex_Sentiment_Analysis
3c5d91d21bd908e6bc0585c50583fa9f0bd4939c
[ "MIT" ]
6
2021-05-16T22:04:53.000Z
2022-02-02T16:45:01.000Z
prediction_service/generate_economic_indicator.py
bytjn1416124/Forex_Technical_Analysis_Platform
3c5d91d21bd908e6bc0585c50583fa9f0bd4939c
[ "MIT" ]
null
null
null
prediction_service/generate_economic_indicator.py
bytjn1416124/Forex_Technical_Analysis_Platform
3c5d91d21bd908e6bc0585c50583fa9f0bd4939c
[ "MIT" ]
6
2021-04-27T04:30:42.000Z
2022-02-06T08:46:25.000Z
from fred import Fred from dotenv import load_dotenv import os import series_id_defs load_dotenv() fr = Fred(api_key=os.getenv("FRED_API_KEY"), response_type="json") def generate_interest_rate(start_date, country): params = { "observation_start": start_date, } indicator_data = {} if country == "Switzerland": indicator_data = fr.series.observations(series_id_defs.SWITZERLAND["interest_rate_id"], params=params) elif country == "Euro": indicator_data = fr.series.observations(series_id_defs.EURO["interest_rate_id"], params=params) elif country == "Canada": indicator_data = fr.series.observations(series_id_defs.CANADA["interest_rate_id"], params=params) elif country == "USA": indicator_data = fr.series.observations(series_id_defs.USA["interest_rate_id"], params=params) elif country == "Australia": indicator_data = fr.series.observations(series_id_defs.AUSTRALIA["interest_rate_id"], params=params) elif country == "Japan": indicator_data = fr.series.observations(series_id_defs.JAPAN["interest_rate_id"], params=params) elif country == "UK": indicator_data = fr.series.observations(series_id_defs.UK["interest_rate_id"], params=params) elif country == "New Zealand": indicator_data = fr.series.observations(series_id_defs.NEW_ZEALAND["interest_rate_id"], params=params) else: raise Exception("No interest rate data for", country) interest_rate = [] new_observations = indicator_data["observations"] for i in range(len(new_observations)): interest_rate.append({"date": new_observations[i]["date"], "value": new_observations[i]["value"]}) return interest_rate def generate_cpi(start_date, country): params = { "observation_start": start_date, } indicator_data = {} if country == "Switzerland": indicator_data = fr.series.observations(series_id_defs.SWITZERLAND["cpi_id"], params=params) elif country == "Euro": indicator_data = fr.series.observations(series_id_defs.EURO["cpi_id"], params=params) elif country == "Canada": indicator_data = fr.series.observations(series_id_defs.CANADA["cpi_id"], params=params) elif country == "USA": indicator_data = fr.series.observations(series_id_defs.USA["cpi_id"], params=params) elif country == "Australia": indicator_data = fr.series.observations(series_id_defs.AUSTRALIA["cpi_id"], params=params) elif country == "Japan": indicator_data = fr.series.observations(series_id_defs.JAPAN["cpi_id"], params=params) elif country == "UK": indicator_data = fr.series.observations(series_id_defs.UK["cpi_id"], params=params) elif country == "New Zealand": indicator_data = fr.series.observations(series_id_defs.NEW_ZEALAND["cpi_id"], params=params) else: raise Exception("No cpi data for", country) cpi = [] new_observations = indicator_data["observations"] for i in range(len(new_observations)): cpi.append({"date": new_observations[i]["date"], "value": new_observations[i]["value"]}) return cpi def generate_gdp(start_date, country): params = { "observation_start": start_date, } indicator_data = {} if country == "Switzerland": indicator_data = fr.series.observations(series_id_defs.SWITZERLAND["gdp_id"], params=params) elif country == "Euro": indicator_data = fr.series.observations(series_id_defs.EURO["gdp_id"], params=params) elif country == "Canada": indicator_data = fr.series.observations(series_id_defs.CANADA["gdp_id"], params=params) elif country == "USA": indicator_data = fr.series.observations(series_id_defs.USA["gdp_id"], params=params) elif country == "Australia": indicator_data = fr.series.observations(series_id_defs.AUSTRALIA["gdp_id"], params=params) elif country == "Japan": indicator_data = fr.series.observations(series_id_defs.JAPAN["gdp_id"], params=params) elif country == "UK": indicator_data = fr.series.observations(series_id_defs.UK["gdp_id"], params=params) elif country == "New Zealand": indicator_data = fr.series.observations(series_id_defs.NEW_ZEALAND["gdp_id"], params=params) else: raise Exception("No gdp data for", country) gdp = [] new_observations = indicator_data["observations"] for i in range(len(new_observations)): gdp.append({"date": new_observations[i]["date"], "value": new_observations[i]["value"]}) return gdp
45.237624
110
0.698402
576
4,569
5.28125
0.092014
0.128205
0.098619
0.16568
0.902367
0.898422
0.884615
0.823471
0.823471
0.823471
0
0
0.171591
4,569
100
111
45.69
0.803699
0
0
0.466667
1
0
0.128912
0
0
0
0
0
0
1
0.033333
false
0
0.044444
0
0.111111
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
7f893156fbd05cb994570d84dd9cdc79907e9f89
110
py
Python
ecpy/__init__.py
andynuma/time-release-encryption-py3
a5c48d07fae8121b59100d4cd79d3e38402d928c
[ "MIT" ]
48
2016-03-30T07:20:49.000Z
2022-01-27T10:48:43.000Z
ecpy/__init__.py
andynuma/time-release-encryption-py3
a5c48d07fae8121b59100d4cd79d3e38402d928c
[ "MIT" ]
11
2017-03-26T11:03:20.000Z
2021-06-01T15:54:03.000Z
ecpy/__init__.py
andynuma/time-release-encryption-py3
a5c48d07fae8121b59100d4cd79d3e38402d928c
[ "MIT" ]
12
2016-06-05T19:09:26.000Z
2021-04-18T04:23:20.000Z
from ecpy.fields import * from ecpy.elliptic_curve import * from ecpy.utils import * from ecpy.rings import *
22
33
0.781818
17
110
5
0.470588
0.376471
0.494118
0
0
0
0
0
0
0
0
0
0.145455
110
4
34
27.5
0.904255
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
f69d35bf23fb4259666188959b775d321fd9ec50
24,865
py
Python
111.py
uburok/megalohobot
452421af0c3ee17fce0ad565ff72cb281da55e1a
[ "MIT" ]
1
2022-02-15T10:17:36.000Z
2022-02-15T10:17:36.000Z
111.py
uburok/megalohobot
452421af0c3ee17fce0ad565ff72cb281da55e1a
[ "MIT" ]
null
null
null
111.py
uburok/megalohobot
452421af0c3ee17fce0ad565ff72cb281da55e1a
[ "MIT" ]
null
null
null
mes = {'update_id': 895937467, 'message': {'message_id': 19033, 'date': 1618345328, 'chat': {'id': -1001165785711, 'type': 'supergroup', 'title': '🅐🅝🅤🅢 - беспредел пипидино'}, 'text': '/say Вартас молодец', 'entities': [{'type': 'bot_command', 'offset': 0, 'length': 4}, {'type': 'text_mention', 'offset': 5, 'length': 6, 'user': {'id': 1346238115, 'first_name': 'Vartas', 'is_bot': False}}], 'caption_entities': [], 'photo': [], 'new_chat_members': [], 'new_chat_photo': [], 'delete_chat_photo': False, 'group_chat_created': False, 'supergroup_chat_created': False, 'channel_chat_created': False, 'from': {'id': 52351769, 'first_name': 'Andrey', 'is_bot': False, 'last_name': 'Burmistrov', 'username': 'aburmistrov', 'language_code': 'ru'}}} private = {'id': 52351769, 'type': 'private', 'title': None, 'username': 'aburmistrov', 'first_name': 'Andrey', 'last_name': 'Burmistrov', 'photo': {'small_file_id': 'AQADAgADt6cxGxnTHgMACQ2DKgAEAgADGdMeAwAE4ea1OK4VaCV41gEAAR4E', 'small_file_unique_id': 'AQAEDYMqAAR41gEAAQ', 'big_file_id': 'AQADAgADt6cxGxnTHgMACQ2DKgAEAwADGdMeAwAE4ea1OK4VaCV61gEAAR4E', 'big_file_unique_id': 'AQAEDYMqAAR61gEAAQ'}, 'bio': None, 'description': None, 'invite_link': None, 'pinned_message': None, 'permissions': None, 'slow_mode_delay': None, 'sticker_set_name': None, 'can_set_sticker_set': None, 'linked_chat_id': None, 'location': None} chat = {'id': -1001165785711, 'type': 'supergroup', 'title': '🅐🅝🅤🅢 - беспредел пипидино', 'username': None, 'first_name': None, 'last_name': None, 'photo': {'small_file_id': 'AQADAgATNR8imy4AAwIAA5F13uUW____s5iYCA7I0kHtSwMAAR4E', 'small_file_unique_id': 'AQADNR8imy4AA-1LAwAB', 'big_file_id': 'AQADAgATNR8imy4AAwMAA5F13uUW____s5iYCA7I0kHvSwMAAR4E', 'big_file_unique_id': 'AQADNR8imy4AA-9LAwAB'}, 'bio': None, 'description': None, 'invite_link': 'https://t.me/joinchat/VDeyY7m4JtAxOGYy', 'pinned_message': None, 'permissions': None, 'slow_mode_delay': 3600, 'sticker_set_name': None, 'can_set_sticker_set': None, 'linked_chat_id': None, 'location': None} ########################### {'content_type': 'text', 'id': 16953, 'message_id': 16953, 'from_user': {'id': 52351769, 'is_bot': False, 'first_name': 'Andrey', 'username': 'aburmistrov', 'last_name': 'Burmistrov', 'language_code': 'ru', 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'date': 1616950940, 'chat': {'id': -1001165785711, 'type': 'supergroup', 'title': '🅐🅝🅤🅢 - беспредел пипидино', 'username': None, 'first_name': None, 'last_name': None, 'photo': None, 'bio': None, 'description': None, 'invite_link': None, 'pinned_message': None, 'permissions': None, 'slow_mode_delay': None, 'sticker_set_name': None, 'can_set_sticker_set': None, 'linked_chat_id': None, 'location': None}, 'forward_from': None, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_signature': None, 'forward_sender_name': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': 'Вартас пёс (тест)', 'entities': [ < telebot.types.MessageEntity object at 0x000001E6694EBD60 >], 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': None, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'dice': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'reply_markup': None, 'json': { 'message_id': 16953, 'from': {'id': 52351769, 'is_bot': False, 'first_name': 'Andrey', 'last_name': 'Burmistrov', 'username': 'aburmistrov', 'language_code': 'ru'}, 'chat': {'id': -1001165785711, 'title': '🅐🅝🅤🅢 - беспредел пипидино', 'type': 'supergroup'}, 'date': 1616950940, 'text': 'Вартас пёс (тест)', 'entities': [{'offset': 0, 'length': 6, 'type': 'text_mention', 'user': {'id': 1346238115, 'is_bot': False, 'first_name': 'Vartas'}}]}} ########################## user = {'user': {'id': 1775365464, 'is_bot': True, 'first_name': 'megalohobot', 'username': 'megalohobot', 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 889248, 'is_bot': False, 'first_name': 'Karkyshka', 'username': 'Karkyshka', 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 1577503305, 'is_bot': False, 'first_name': 'Zeborka', 'username': None, 'last_name': 'Zeborka', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = { 'user': {'id': 1057783333, 'is_bot': False, 'first_name': 'Ush', 'username': 'A_Ushakoff', 'last_name': 'Aleksey', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Админ', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 1560745599, 'is_bot': False, 'first_name': 'Иван', 'username': None, 'last_name': 'Аккуратов', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Burning Man', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 1539854471, 'is_bot': False, 'first_name': 'Maxim', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Шарик', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 1599686606, 'is_bot': False, 'first_name': 'Андрей', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Архивариус', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 1586530719, 'is_bot': False, 'first_name': 'Evgeniy', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 747091323, 'is_bot': False, 'first_name': 'Светлана', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 742508167, 'is_bot': False, 'first_name': '83ND3R', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 535078107, 'is_bot': False, 'first_name': 'Оксана', 'username': 'Coffeewithoutmilk', 'last_name': None, 'language_code': 'ru', 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Хомяк', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 993222503, 'is_bot': False, 'first_name': 'DARYAL', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 1346238115, 'is_bot': False, 'first_name': 'Vartas', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Пёс', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 437258746, 'is_bot': False, 'first_name': 'Eva', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 364642280, 'is_bot': False, 'first_name': 'lera', 'username': None, 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': False, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = { 'user': {'id': 442181816, 'is_bot': False, 'first_name': 'Alexandr', 'username': None, 'last_name': 'Gorodnichev', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 240470046, 'is_bot': False, 'first_name': 'Maxx', 'username': 'pub_maxx', 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 375423828, 'is_bot': False, 'first_name': 'A', 'username': None, 'last_name': 'Ush', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 312779364, 'is_bot': False, 'first_name': 'Morbo', 'username': None, 'last_name': 'Morbo', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = { 'user': {'id': 274448416, 'is_bot': False, 'first_name': 'Boris', 'username': 'Bobrobelka', 'last_name': 'Pavlov', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 199361131, 'is_bot': False, 'first_name': 'Aleksey', 'username': 'AlekseyLopatin', 'last_name': 'Lopatin', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 129079647, 'is_bot': False, 'first_name': 'РыБо', 'username': None, 'last_name': 'РыБо', 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 64094289, 'is_bot': False, 'first_name': 'slalus', 'username': 'slalus', 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': 'Mr.Breaking news', 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 52351769, 'is_bot': False, 'first_name': 'Andrey', 'username': 'aburmistrov', 'last_name': 'Burmistrov', 'language_code': 'ru', 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'creator', 'custom_title': None, 'can_be_edited': None, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': None, 'can_restrict_members': None, 'can_promote_members': None, 'can_change_info': None, 'can_invite_users': None, 'can_pin_messages': None, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None} user = {'user': {'id': 5577509, 'is_bot': False, 'first_name': 'Da Tek Shaman', 'username': 'DaTekShaman', 'last_name': None, 'language_code': None, 'can_join_groups': None, 'can_read_all_group_messages': None, 'supports_inline_queries': None}, 'status': 'administrator', 'custom_title': None, 'can_be_edited': False, 'can_post_messages': None, 'can_edit_messages': None, 'can_delete_messages': True, 'can_restrict_members': True, 'can_promote_members': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True, 'is_member': None, 'can_send_messages': None, 'can_send_media_messages': None, 'can_send_polls': None, 'can_send_other_messages': None, 'can_add_web_page_previews': None, 'until_date': None}
95.634615
748
0.665272
3,093
24,865
4.921436
0.073392
0.115425
0.124162
0.06241
0.8311
0.796807
0.790829
0.790829
0.778873
0.770004
0
0.022502
0.176071
24,865
259
749
96.003861
0.719725
0
0
0.681452
0
0
0.54307
0.137173
0
0
0.000733
0
0
0
null
null
0
0
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
12069e90e10a7e4ad196d74c5df74b8f3200b0a1
203
py
Python
PythonClient/reinforcement_learning/airgym/envs/__init__.py
zewuzheng17/Carintercept
58a18ac84631fa03ec245dcdefdcc0ead6f84d67
[ "MIT" ]
null
null
null
PythonClient/reinforcement_learning/airgym/envs/__init__.py
zewuzheng17/Carintercept
58a18ac84631fa03ec245dcdefdcc0ead6f84d67
[ "MIT" ]
null
null
null
PythonClient/reinforcement_learning/airgym/envs/__init__.py
zewuzheng17/Carintercept
58a18ac84631fa03ec245dcdefdcc0ead6f84d67
[ "MIT" ]
1
2022-03-22T06:16:25.000Z
2022-03-22T06:16:25.000Z
from airgym.envs.airsim_env import AirSimEnv from airgym.envs.car_env import AirSimCarEnv from airgym.envs.drone_env import AirSimDroneEnv from airgym.envs.car_intercept_env import AirSimCarInterceptEnv
40.6
63
0.881773
29
203
6
0.448276
0.229885
0.321839
0.195402
0
0
0
0
0
0
0
0
0.078818
203
4
64
50.75
0.930481
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
1267506b24c924d9118edd01fef0e85fc660aa58
28,685
py
Python
lib/utils_cell_auto.py
octaviomtz/Growing-Neural-Cellular-Automata
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
[ "MIT" ]
null
null
null
lib/utils_cell_auto.py
octaviomtz/Growing-Neural-Cellular-Automata
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
[ "MIT" ]
null
null
null
lib/utils_cell_auto.py
octaviomtz/Growing-Neural-Cellular-Automata
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
[ "MIT" ]
null
null
null
# import cv2 import torch import numpy as np import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import matplotlib.pyplot as plt from IPython.display import Image, HTML, clear_output import matplotlib import io import sys def correct_label_in_plot(model): '''get a string with the network architecture to print in the figure''' # https://www.kite.com/python/answers/how-to-redirect-print-output-to-a-variable-in-python old_stdout = sys.stdout new_stdout = io.StringIO() sys.stdout = new_stdout print(model); output = new_stdout.getvalue() sys.stdout = old_stdout model_str = [i.split(', k')[0] for i in output.split('\n')] model_str_layers = [i.split(':')[-1] for i in model_str[2:-3]] model_str = [model_str[0]]+model_str_layers model_str = str(model_str).replace("', '",'\n') return model_str def create_sobel_and_identity(device='cuda'): ident = torch.tensor([[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]]).to(device) sobel_x = (torch.tensor([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])/8.0).to(device) lap = (torch.tensor([[1.0,2.0,1.0],[2.0,-12,2.0],[1.0,2.0,1.0]])/16.0).to(device) return ident, sobel_x, lap def prepare_seed(target, this_seed, device, num_channels = 16, pool_size = 1024): # prepare seed height, width, _ = np.shape(target) seed = np.zeros([1, height, width, num_channels], np.float32) for i in range(num_channels-1): seed[:,..., i+1] = this_seed # Preparing the seed pool seed_tensor = torch.tensor(seed).permute(0,-1,1,2).to(device) seed_pool = torch.repeat_interleave(seed_tensor, repeats = pool_size, dim = 0) return seed, seed_tensor, seed_pool def epochs_in_inner_loop(i, inner_iter_aux=0, inner_iter=0, thresh_do_nothing=100, thresh_do_something=200, increase=10, inner_iter_max=100): if i < thresh_do_nothing: inner_iter = 100 elif i % thresh_do_something == 0: inner_iter_aux = inner_iter_aux + increase inner_iter = np.min([inner_iter_aux, inner_iter_max]) else: inner_iter=inner_iter return inner_iter, inner_iter_aux def plot_loss_and_lesion_synthesis(losses, optimizer, model_str, i, loss, sample_size, out, no_plot=False): if no_plot: lr_info = f'\nlr_init={optimizer.param_groups[0]["initial_lr"]:.1E}\nlr_last={optimizer.param_groups[0]["lr"]:.1E}' model_str_final = model_str+lr_info return model_str_final clear_output(True) f, (ax0, ax1) = plt.subplots(2, 1, figsize=(12,10), gridspec_kw={'height_ratios': [4, 1]}) lr_info = f'\nlr_init={optimizer.param_groups[0]["initial_lr"]:.1E}\nlr_last={optimizer.param_groups[0]["lr"]:.1E}' model_str_final = model_str+lr_info ax0.plot(losses, label=model_str_final) ax0.set_yscale('log') ax0.legend(loc='upper right', fontsize=16) stack = [] for z in range(sample_size): stack.append(to_rgb(out[z].permute(-2, -1,0).cpu().detach().numpy())) ax1.imshow(np.clip(np.hstack(np.squeeze(stack)), 0,1)) ax1.axis('off') plt.show() print(i, loss.item(), flush = True) return model_str_final def to_rgb(img, channel=1): '''return visible channel''' # rgb, a = img[:,:,:1], img[:,:,1:2] rgb, a = img[:,:,:channel], img[:,:,channel:channel+1] return 1.0-a+rgb class CeA_BASE(nn.Module): def __init__(self, checkpoint = None, seq_layers = None, device = 'cuda', grow_on_k_iter=1, background_intensity=.19, step_size=1, scale_mask=1, pretrain_thres=100, ch0_1=1, ch1_16=16, alive_thresh=0.1): ''' Kind of a modular class for a CA model args: checkpoint = 'path/to/model.pt' seq_layers = nn.Sequential(your, pytorch, layers) device = 'cuda' or 'cpu' ''' super(CeA_BASE, self).__init__() self.ident = torch.tensor([[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]]).to(device) self.sobel_x = (torch.tensor([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])/8.0).to(device) self.lap = (torch.tensor([[1.0,2.0,1.0],[2.0,-12,2.0],[1.0,2.0,1.0]])/16.0).to(device) self.grow_on_k_iter = grow_on_k_iter self.background_intensity = background_intensity self.step_size = step_size self.scale_mask = scale_mask self.pretrain_thres = pretrain_thres self.ch0_1 = ch0_1 self.ch1_16 = ch1_16 self.alive_thresh = alive_thresh if seq_layers is not None: self.model = seq_layers else: self.model = nn.Sequential( nn.Conv2d(64, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 16, kernel_size = 1, bias = True), ) ''' initial condition for "do nothing" behaviour: * all biases should be zero * the weights of the last layer should be zero ''' for l in range(len(self.model)): if isinstance(self.model[l], nn.Conv2d): self.model[l].bias.data.fill_(0) if l == len(self.model) -1: self.model[l].weight.data.fill_(0) if checkpoint is not None: self.load_state_dict(torch.load(checkpoint)) self.to(device= device) def perchannel_conv(self, x, filters): '''filters: [filter_n, h, w]''' b, ch, h, w = x.shape y = x.reshape(b*ch, 1, h, w) y = torch.nn.functional.pad(y, [1, 1, 1, 1], 'circular') y = torch.nn.functional.conv2d(y, filters[:,None]) return y.reshape(b, -1, h, w) def perception(self, x): filters = torch.stack([self.ident, self.sobel_x, self.sobel_x.T, self.lap]) return self.perchannel_conv(x, filters) def normalize_grads(self): ''' gradient normalization for constant step size and to avoid spikes ''' for p in self.parameters(): p.grad.data = p.grad.data/(p.grad.data.norm()+1e-8) def get_alive_mask(self, x): ''' looks for cells that have values over 0.1, and allows only their adjacent cells to participate in growth ''' alpha = x[:,1:2,:,:] pooled = (F.max_pool2d(alpha, 3,1, padding =1 ) > 0.1).float() return pooled def train_step(self, seed, target, target_loss_func, epochs_inside, epoch_outside = 1000, masked_loss=False): ''' a single training step for the model, feel free to play around with different loss functions like L1 loss the loss is calculated for only the first 4 channels of the output ''' x = seed for epoch_in in range(epochs_inside): x, alive_mask, other = self.forward(x, epoch_in, epoch_outside) if masked_loss == True: alive_mask_dilated = (F.max_pool2d(alive_mask[0], 3,1, padding =1 ) > 0.1).float() target_loss = target_loss_func(x[:,:1, :,:] * alive_mask_dilated, target * alive_mask_dilated) else: # target_loss = target_loss_func(x[:,:2, :,:] * target[:,1:,...], target * target[:,1:,...]) # used to synthesize almost all nodules target_loss = target_loss_func(x[:,:2, :,:], target) # ORIGINAL loss = target_loss return loss, x, alive_mask.cpu().numpy(), other.detach().cpu().numpy() #batch_mean_rmse_per_pixel.detach().cpu().numpy() def forward(self, x, epoch_in, epoch_outside): ''' nice little forward function for the model 1. fetches an alive mask 2. generates another random mask of 0's and 1's 3. updates the input 4. applies alive mask ''' mask_previous = alive_mask = (x[:,1:2,:,:] > 0.1).float() # self_pretraining if epoch_outside < self.pretrain_thres: alive_mask = self.get_alive_mask(x) else: if epoch_in % self.grow_on_k_iter == 0: alive_mask = self.get_alive_mask(x) else: alive_mask = (x[:,1:2,:,:] > 0.1).float() mask_previous = torch.zeros_like(alive_mask)#OMM added in CeA # MASK CLAMP # | = self.background_intensity # X = self.step_size # S = self.scale_mask # ch0 ch1 ch2 ... # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # ||XX||||||XX|| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS mask_diff = alive_mask - mask_previous mask_clamp_ch0 = torch.clip((1-mask_diff) + self.background_intensity,0,self.step_size) #make sure this is only applied to the first channel mask_clamp = torch.repeat_interleave(mask_clamp_ch0,16,1) mask_clamp_ones = torch.ones_like(torch.squeeze(mask_clamp_ch0))*self.scale_mask for idx_channel in np.arange(1,16,1): mask_clamp[:,idx_channel,:,:] = mask_clamp_ones mask = torch.clamp(torch.round(torch.rand_like(x[:,:1,:,:])) , 0,1) P = self.perception(x) Y = self.model(P) # out = x + (Y * mask * mask_clamp) out = x + (Y * mask) #original out *= alive_mask return out, alive_mask, mask_clamp class CeA_00(nn.Module): def __init__(self, checkpoint = None, seq_layers = None, device = 'cuda', grow_on_k_iter=3, background_intensity=.19, step_size=1, scale_mask=1, pretrain_thres=100): ''' Kind of a modular class for a CA model args: checkpoint = 'path/to/model.pt' seq_layers = nn.Sequential(your, pytorch, layers) device = 'cuda' or 'cpu' ''' super(CeA_00, self).__init__() self.ident = torch.tensor([[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]]).to(device) self.sobel_x = (torch.tensor([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])/8.0).to(device) self.lap = (torch.tensor([[1.0,2.0,1.0],[2.0,-12,2.0],[1.0,2.0,1.0]])/16.0).to(device) self.grow_on_k_iter = grow_on_k_iter self.background_intensity = background_intensity self.step_size = step_size self.scale_mask = scale_mask self.pretrain_thres = pretrain_thres if seq_layers is not None: self.model = seq_layers else: self.model = nn.Sequential( nn.Conv2d(64, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 16, kernel_size = 1, bias = True), ) ''' initial condition for "do nothing" behaviour: * all biases should be zero * the weights of the last layer should be zero ''' for l in range(len(self.model)): if isinstance(self.model[l], nn.Conv2d): self.model[l].bias.data.fill_(0) if l == len(self.model) -1: self.model[l].weight.data.fill_(0) if checkpoint is not None: self.load_state_dict(torch.load(checkpoint)) self.to(device= device) def perchannel_conv(self, x, filters): '''filters: [filter_n, h, w]''' b, ch, h, w = x.shape y = x.reshape(b*ch, 1, h, w) y = torch.nn.functional.pad(y, [1, 1, 1, 1], 'circular') y = torch.nn.functional.conv2d(y, filters[:,None]) return y.reshape(b, -1, h, w) def perception(self, x): filters = torch.stack([self.ident, self.sobel_x, self.sobel_x.T, self.lap]) return self.perchannel_conv(x, filters) def normalize_grads(self): ''' gradient normalization for constant step size and to avoid spikes ''' for p in self.parameters(): p.grad.data = p.grad.data/(p.grad.data.norm()+1e-8) def get_alive_mask(self, x): ''' looks for cells that have values over 0.1, and allows only their adjacent cells to participate in growth ''' alpha = x[:,1:2,:,:] pooled = (F.max_pool2d(alpha, 3,1, padding =1 ) > 0.1).float() return pooled def train_step(self, seed, target, target_loss_func, epochs_inside, epoch_outside = 1000, masked_loss=False): ''' a single training step for the model, feel free to play around with different loss functions like L1 loss the loss is calculated for only the first 4 channels of the output ''' x = seed for epoch_in in range(epochs_inside): x, alive_mask, other = self.forward(x, epoch_in, epoch_outside) if masked_loss == True: alive_mask_dilated = (F.max_pool2d(alive_mask[0], 3,1, padding =1 ) > 0.1).float() target_loss = target_loss_func(x[:,:1, :,:] * alive_mask_dilated, target * alive_mask_dilated) else: # target_loss = target_loss_func(x[:,:2, :,:] * target[:,1:,...], target * target[:,1:,...]) # used to synthesize almost all nodules target_loss = target_loss_func(x[:,:2, :,:], target) # ORIGINAL loss = target_loss return loss, x, alive_mask.cpu().numpy(), other.detach().cpu().numpy() #batch_mean_rmse_per_pixel.detach().cpu().numpy() def forward(self, x, epoch_in, epoch_outside): ''' nice little forward function for the model 1. fetches an alive mask 2. generates another random mask of 0's and 1's 3. updates the input 4. applies alive mask ''' mask_previous = alive_mask = (x[:,1:2,:,:] > 0.1).float() # self_pretraining if epoch_outside < self.pretrain_thres: alive_mask = self.get_alive_mask(x) else: if epoch_in % self.grow_on_k_iter == 0: alive_mask = self.get_alive_mask(x) else: alive_mask = (x[:,1:2,:,:] > 0.1).float() mask_previous = torch.zeros_like(alive_mask)#OMM added in CeA # MASK CLAMP # | = self.background_intensity # X = self.step_size # S = self.scale_mask # ch0 ch1 ch2 ... # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # ||XX||||||XX|| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS mask_diff = alive_mask - mask_previous mask_clamp_ch0 = torch.clip((1-mask_diff) + self.background_intensity,0,self.step_size) #make sure this is only applied to the first channel mask_clamp = torch.repeat_interleave(mask_clamp_ch0,16,1) mask_clamp_ones = torch.ones_like(torch.squeeze(mask_clamp_ch0))*self.scale_mask for idx_channel in np.arange(1,16,1): mask_clamp[:,idx_channel,:,:] = mask_clamp_ones mask = torch.clamp(torch.round(torch.rand_like(x[:,:1,:,:])) , 0,1) P = self.perception(x) Y = self.model(P) out = x + (Y * mask * mask_clamp) out *= alive_mask return out, alive_mask, mask_clamp class CeA_0x(nn.Module): def __init__(self, checkpoint = None, seq_layers = None, device = 'cuda', grow_on_k_iter=3, background_intensity=.19, step_size=1, scale_mask=1, pretrain_thres=100, ch0_1=1, ch1_16=16, alive_thresh=0.1): ''' Kind of a modular class for a CA model args: checkpoint = 'path/to/model.pt' seq_layers = nn.Sequential(your, pytorch, layers) device = 'cuda' or 'cpu' ''' super(CeA_0x, self).__init__() self.ident = torch.tensor([[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]]).to(device) self.sobel_x = (torch.tensor([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])/8.0).to(device) self.lap = (torch.tensor([[1.0,2.0,1.0],[2.0,-12,2.0],[1.0,2.0,1.0]])/16.0).to(device) self.grow_on_k_iter = grow_on_k_iter self.background_intensity = background_intensity self.step_size = step_size self.scale_mask = scale_mask self.pretrain_thres = pretrain_thres self.ch0_1 = ch0_1 self.ch1_16 = ch1_16 self.alive_thresh = alive_thresh if seq_layers is not None: self.model = seq_layers else: self.model = nn.Sequential( nn.Conv2d(64, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 16, kernel_size = 1, bias = True), ) ''' initial condition for "do nothing" behaviour: * all biases should be zero * the weights of the last layer should be zero ''' for l in range(len(self.model)): if isinstance(self.model[l], nn.Conv2d): self.model[l].bias.data.fill_(0) if l == len(self.model) -1: self.model[l].weight.data.fill_(0) if checkpoint is not None: self.load_state_dict(torch.load(checkpoint)) self.to(device= device) def perchannel_conv(self, x, filters): '''filters: [filter_n, h, w]''' b, ch, h, w = x.shape y = x.reshape(b*ch, 1, h, w) y = torch.nn.functional.pad(y, [1, 1, 1, 1], 'circular') y = torch.nn.functional.conv2d(y, filters[:,None]) return y.reshape(b, -1, h, w) def perception(self, x): filters = torch.stack([self.ident, self.sobel_x, self.sobel_x.T, self.lap]) return self.perchannel_conv(x, filters) def normalize_grads(self): ''' gradient normalization for constant step size and to avoid spikes ''' for p in self.parameters(): p.grad.data = p.grad.data/(p.grad.data.norm()+1e-8) def get_alive_mask(self, x): ''' looks for cells that have values over 0.1, and allows only their adjacent cells to participate in growth ''' alpha = x[:,1:2,:,:] pooled = (F.max_pool2d(alpha, 3,1, padding =1 ) > self.alive_thresh).float() return pooled def train_step(self, seed, target, target_loss_func, epochs_inside, epoch_outside = 1000, masked_loss=False): ''' a single training step for the model, feel free to play around with different loss functions like L1 loss the loss is calculated for only the first 4 channels of the output ''' x = seed for epoch_in in range(epochs_inside): x, alive_mask, other = self.forward(x, epoch_in, epoch_outside) if masked_loss == True: alive_mask_dilated = (F.max_pool2d(alive_mask[0], 3,1, padding =1 ) > 0.1).float() target_loss = target_loss_func(x[:,:1, :,:] * alive_mask_dilated, target * alive_mask_dilated) else: # target_loss = target_loss_func(x[:,:2, :,:] * target[:,1:,...], target * target[:,1:,...]) # used to synthesize almost all nodules target_loss = target_loss_func(x[:,:2, :,:], target) # ORIGINAL loss = target_loss return loss, x, alive_mask.cpu().numpy(), other.detach().cpu().numpy() #batch_mean_rmse_per_pixel.detach().cpu().numpy() def forward(self, x, epoch_in, epoch_outside): ''' nice little forward function for the model 1. fetches an alive mask 2. generates another random mask of 0's and 1's 3. updates the input 4. applies alive mask ''' mask_previous = alive_mask = (x[:,1:2,:,:] > self.alive_thresh).float() # self_pretraining if epoch_outside < self.pretrain_thres: alive_mask = self.get_alive_mask(x) else: if epoch_in % self.grow_on_k_iter == 0: alive_mask = self.get_alive_mask(x) else: alive_mask = (x[:,1:2,:,:] > self.alive_thresh).float() mask_previous = torch.zeros_like(alive_mask)#OMM added in CeA # MASK CLAMP # | = self.background_intensity # X = self.step_size # S = self.scale_mask # ch0 ch1 ch2 ... # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # ||XX||||||XX|| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS mask_diff = alive_mask - mask_previous mask_clamp_ch0 = torch.clip((1-mask_diff) + self.background_intensity,0,self.step_size) #make sure this is only applied to the first channel mask_clamp = torch.repeat_interleave(mask_clamp_ch0,16,1) mask_clamp_ones = torch.ones_like(torch.squeeze(mask_clamp_ch0))*self.scale_mask for idx_channel in np.arange(self.ch0_1,self.ch1_16,1): mask_clamp[:,idx_channel,:,:] = mask_clamp_ones mask = torch.clamp(torch.round(torch.rand_like(x[:,:1,:,:])) , 0,1) P = self.perception(x) Y = self.model(P) out = x + (Y * mask * mask_clamp) out *= alive_mask return out, alive_mask, mask_clamp class CeA_BASE_1CNN(nn.Module): def __init__(self, checkpoint = None, seq_layers = None, device = 'cuda', grow_on_k_iter=1, background_intensity=.19, step_size=1, scale_mask=1, pretrain_thres=100, ch0_1=1, ch1_16=16, alive_thresh=0.1): ''' Kind of a modular class for a CA model args: checkpoint = 'path/to/model.pt' seq_layers = nn.Sequential(your, pytorch, layers) device = 'cuda' or 'cpu' ''' super(CeA_BASE_1CNN, self).__init__() self.ident = torch.tensor([[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]]).to(device) self.sobel_x = (torch.tensor([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])/8.0).to(device) self.lap = (torch.tensor([[1.0,2.0,1.0],[2.0,-12,2.0],[1.0,2.0,1.0]])/16.0).to(device) self.grow_on_k_iter = grow_on_k_iter self.background_intensity = background_intensity self.step_size = step_size self.scale_mask = scale_mask self.pretrain_thres = pretrain_thres self.ch0_1 = ch0_1 self.ch1_16 = ch1_16 self.alive_thresh = alive_thresh if seq_layers is not None: self.model = seq_layers else: self.model = nn.Sequential( nn.Conv2d(64, 256, kernel_size = 3,padding =1, bias = True), nn.ReLU(), nn.Conv2d(256, 16, kernel_size = 1, bias = True), ) ''' initial condition for "do nothing" behaviour: * all biases should be zero * the weights of the last layer should be zero ''' for l in range(len(self.model)): if isinstance(self.model[l], nn.Conv2d): self.model[l].bias.data.fill_(0) if l == len(self.model) -1: self.model[l].weight.data.fill_(0) if checkpoint is not None: self.load_state_dict(torch.load(checkpoint)) self.to(device= device) def perchannel_conv(self, x, filters): '''filters: [filter_n, h, w]''' b, ch, h, w = x.shape y = x.reshape(b*ch, 1, h, w) y = torch.nn.functional.pad(y, [1, 1, 1, 1], 'circular') y = torch.nn.functional.conv2d(y, filters[:,None]) return y.reshape(b, -1, h, w) def perception(self, x): filters = torch.stack([self.ident, self.sobel_x, self.sobel_x.T, self.lap]) return self.perchannel_conv(x, filters) def normalize_grads(self): ''' gradient normalization for constant step size and to avoid spikes ''' for p in self.parameters(): p.grad.data = p.grad.data/(p.grad.data.norm()+1e-8) def get_alive_mask(self, x): ''' looks for cells that have values over 0.1, and allows only their adjacent cells to participate in growth ''' alpha = x[:,1:2,:,:] pooled = (F.max_pool2d(alpha, 3,1, padding =1 ) > 0.1).float() return pooled def train_step(self, seed, target, target_loss_func, epochs_inside, epoch_outside = 1000, masked_loss=False): ''' a single training step for the model, feel free to play around with different loss functions like L1 loss the loss is calculated for only the first 4 channels of the output ''' x = seed for epoch_in in range(epochs_inside): x, alive_mask, other = self.forward(x, epoch_in, epoch_outside) if masked_loss == True: alive_mask_dilated = (F.max_pool2d(alive_mask[0], 3,1, padding =1 ) > 0.1).float() target_loss = target_loss_func(x[:,:1, :,:] * alive_mask_dilated, target * alive_mask_dilated) else: # target_loss = target_loss_func(x[:,:2, :,:] * target[:,1:,...], target * target[:,1:,...]) # used to synthesize almost all nodules target_loss = target_loss_func(x[:,:2, :,:], target) # ORIGINAL loss = target_loss return loss, x, alive_mask.cpu().numpy(), other.detach().cpu().numpy() #batch_mean_rmse_per_pixel.detach().cpu().numpy() def forward(self, x, epoch_in, epoch_outside): ''' nice little forward function for the model 1. fetches an alive mask 2. generates another random mask of 0's and 1's 3. updates the input 4. applies alive mask ''' mask_previous = alive_mask = (x[:,1:2,:,:] > 0.1).float() # self_pretraining if epoch_outside < self.pretrain_thres: alive_mask = self.get_alive_mask(x) else: if epoch_in % self.grow_on_k_iter == 0: alive_mask = self.get_alive_mask(x) else: alive_mask = (x[:,1:2,:,:] > 0.1).float() mask_previous = torch.zeros_like(alive_mask)#OMM added in CeA # MASK CLAMP # | = self.background_intensity # X = self.step_size # S = self.scale_mask # ch0 ch1 ch2 ... # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # ||XX||||||XX|| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||XX||||XX||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||XXXX||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS # |||||||||||||| SSSSSSSSSSSSS SSSSSSSSSSSSS SSSSSSSSSSSSS mask_diff = alive_mask - mask_previous mask_clamp_ch0 = torch.clip((1-mask_diff) + self.background_intensity,0,self.step_size) #make sure this is only applied to the first channel mask_clamp = torch.repeat_interleave(mask_clamp_ch0,16,1) mask_clamp_ones = torch.ones_like(torch.squeeze(mask_clamp_ch0))*self.scale_mask for idx_channel in np.arange(1,16,1): mask_clamp[:,idx_channel,:,:] = mask_clamp_ones mask = torch.clamp(torch.round(torch.rand_like(x[:,:1,:,:])) , 0,1) P = self.perception(x) Y = self.model(P) # out = x + (Y * mask * mask_clamp) out = x + (Y * mask) #original out *= alive_mask return out, alive_mask, mask_clamp
42.433432
207
0.583406
4,012
28,685
3.99651
0.083998
0.013097
0.014968
0.013721
0.888238
0.884932
0.884932
0.884932
0.884932
0.883061
0
0.041118
0.279345
28,685
676
208
42.433432
0.73452
0.222486
0
0.797927
0
0.005181
0.014393
0.009853
0
0
0
0
0
1
0.088083
false
0
0.025907
0
0.194301
0.005181
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d6024711dd06350b8dfa6bed9c76299d11f94eb4
12,005
py
Python
site/churchcal/migrations/0001_initial.py
scottBowles/dailyoffice2019
ca750ac77316d247ca7a7a820e085f9968fbc8ff
[ "MIT" ]
19
2020-01-12T23:57:22.000Z
2022-03-30T16:35:17.000Z
site/churchcal/migrations/0001_initial.py
scottBowles/dailyoffice2019
ca750ac77316d247ca7a7a820e085f9968fbc8ff
[ "MIT" ]
59
2020-01-13T00:45:27.000Z
2022-02-20T04:10:05.000Z
site/churchcal/migrations/0001_initial.py
scottBowles/dailyoffice2019
ca750ac77316d247ca7a7a820e085f9968fbc8ff
[ "MIT" ]
7
2020-01-21T21:12:03.000Z
2021-10-24T01:15:50.000Z
# Generated by Django 2.1.5 on 2019-09-26 02:17 from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name="Calendar", fields=[ ("uuid", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=256)), ("year", models.CharField(max_length=256)), ("abbreviation", models.CharField(max_length=256)), ("google_sheet_id", models.CharField(max_length=256)), ], options={"abstract": False}, ), migrations.CreateModel( name="Commemoration", fields=[ ("uuid", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=256)), ("color", models.CharField(blank=True, max_length=256, null=True)), ("additional_color", models.CharField(blank=True, max_length=256, null=True)), ("alternate_color", models.CharField(blank=True, max_length=256, null=True)), ("alternate_color_2", models.CharField(blank=True, max_length=256, null=True)), ("collect", models.TextField(blank=True, null=True)), ("alternate_collect", models.TextField(blank=True, null=True)), ("eve_collect", models.TextField(blank=True, null=True)), ("color_notes", models.CharField(blank=True, max_length=256, null=True)), ], options={"abstract": False}, ), migrations.CreateModel( name="CommemorationRank", fields=[ ("uuid", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=256)), ("formatted_name", models.CharField(max_length=256)), ( "precedence_rank", models.PositiveSmallIntegerField( choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9)] ), ), ("required", models.BooleanField(default=True)), ("calendar", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="churchcal.Calendar")), ], options={"abstract": False}, ), migrations.CreateModel( name="Denomination", fields=[ ("uuid", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=256)), ("abbreviation", models.CharField(max_length=256)), ], options={"abstract": False}, ), migrations.CreateModel( name="Proper", fields=[ ("uuid", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ( "number", models.IntegerField( choices=[ (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), ] ), ), ("start_date", models.DateField()), ("end_date", models.DateField()), ("collect", models.TextField(blank=True, null=True)), ("calendar", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="churchcal.Calendar")), ], options={"abstract": False}, ), migrations.CreateModel( name="Season", fields=[ ("uuid", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ( "order", models.IntegerField( choices=[ (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), ] ), ), ("name", models.CharField(max_length=1024)), ("color", models.CharField(max_length=255)), ("alternate_color", models.CharField(blank=True, max_length=255, null=True)), ("calendar", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="churchcal.Calendar")), ( "rank", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="churchcal.CommemorationRank"), ), ], options={"abstract": False}, ), migrations.CreateModel( name="FerialCommemoration", fields=[ ( "commemoration_ptr", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to="churchcal.Commemoration", ), ) ], options={"managed": False}, bases=("churchcal.commemoration",), ), migrations.CreateModel( name="SanctoraleBasedCommemoration", fields=[ ( "commemoration_ptr", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to="churchcal.Commemoration", ), ), ("weekday", models.CharField(max_length=256)), ("number_after", models.SmallIntegerField()), ("month_after", models.PositiveSmallIntegerField()), ("day_after", models.PositiveSmallIntegerField()), ], options={"abstract": False}, bases=("churchcal.commemoration",), ), migrations.CreateModel( name="SanctoraleCommemoration", fields=[ ( "commemoration_ptr", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to="churchcal.Commemoration", ), ), ("month", models.PositiveSmallIntegerField()), ("day", models.PositiveSmallIntegerField()), ], options={"abstract": False}, bases=("churchcal.commemoration",), ), migrations.CreateModel( name="TemporaleCommemoration", fields=[ ( "commemoration_ptr", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to="churchcal.Commemoration", ), ), ("days_after_easter", models.SmallIntegerField()), ], options={"abstract": False}, bases=("churchcal.commemoration",), ), migrations.AddField( model_name="season", name="start_commemoration", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="churchcal.Commemoration" ), ), migrations.AddField( model_name="commemoration", name="calendar", field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="churchcal.Calendar"), ), migrations.AddField( model_name="commemoration", name="cannot_occur_after", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="churchcal.Commemoration" ), ), migrations.AddField( model_name="commemoration", name="rank", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="churchcal.CommemorationRank" ), ), migrations.AddField( model_name="calendar", name="denomination", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="churchcal.Denomination" ), ), ]
41.539792
118
0.431653
906
12,005
5.607064
0.149007
0.05315
0.035433
0.06063
0.81752
0.806693
0.770866
0.719291
0.710039
0.692717
0
0.04273
0.448313
12,005
288
119
41.684028
0.724294
0.003748
0
0.75089
1
0
0.104365
0.031694
0
0
0
0
0
1
0
false
0
0.010676
0
0.024911
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
d614de9215ac6fa460c971679c56c873d0a89e7a
13,263
py
Python
run.py
jtuyls/variational-autoencoder
4db193b439a0c4630677ad15ba3b68f8287e17f5
[ "MIT" ]
null
null
null
run.py
jtuyls/variational-autoencoder
4db193b439a0c4630677ad15ba3b68f8287e17f5
[ "MIT" ]
null
null
null
run.py
jtuyls/variational-autoencoder
4db193b439a0c4630677ad15ba3b68f8287e17f5
[ "MIT" ]
null
null
null
import os from visualization import Visualization from variational_autoencoder import VariationalAutoEncoder from vae_convnet import VaeConvNet from vae_ffnn import VaeFfnn from vae_convnet2 import VaeConvNet2 from vae_input_output import VaeInputOutput scenario = 1.4 if __name__ == "__main__": if scenario == 0: # Test scenario output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_0' viz = Visualization(output_dir=output_dir) vae = VaeConvNet2(visualization=viz) vae.main(data_set="mnist", n_latent=20, num_epochs=2, batch_size=100, downsampling=100) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() ############### #### MNIST #### ############### if scenario == 1.0: # Test scenario # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="mnist", n_latent=20, num_epochs=10, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() #### n_latent == 2 #### if scenario == 1.1: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1_1' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="mnist", n_latent=2, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() if scenario == 1.2: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1_2' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="mnist", n_latent=2, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() #### n_latent == 20 #### if scenario == 1.3: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1_3' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="mnist", n_latent=20, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() if scenario == 1.4: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1_4' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="mnist", n_latent=20, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() #### n_latent == 200 #### if scenario == 1.5: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1_5' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="mnist", n_latent=200, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space(nx=20) if scenario == 1.6: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_1_6' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="mnist", n_latent=200, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space(nx=20) ########################### #### Celebrity dataset #### ########################### if scenario == 2.0: # Test scenario # Train standard variational autoencoder and visualize latent layer output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="celeb_data", n_latent=2, num_epochs=2, batch_size=100, downsampling=100) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() #### n_latent == 2 #### if scenario == 2.1: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2_1' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="celeb_data", n_latent=2, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() if scenario == 2.2: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2_2' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="celeb_data", n_latent=2, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() #### n_latent == 20 #### if scenario == 2.3: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2_3' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="celeb_data", n_latent=20, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() if scenario == 2.4: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2_4' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="celeb_data", n_latent=20, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() #### n_latent == 200 #### if scenario == 2.5: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2_5' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="celeb_data", n_latent=200, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space(nx=20) if scenario == 2.6: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_2_6' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="celeb_data", n_latent=200, num_epochs=100, batch_size=100, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space(nx=20) ################### #### Cell data #### ################### if scenario == 3.0: # Train variational autoencoder with convolutional layers on cell data set with different inputs and outputs output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3' viz = Visualization(output_dir=output_dir) vae = VaeInputOutput(visualization=viz) vae.main(data_set="cell_data", n_latent=20, num_epochs=2, batch_size=100, downsampling=100) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) #### n_latent == 2 #### if scenario == 3.1: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3_1' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="cell_data", n_latent=2, num_epochs=1500, batch_size=32, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() if scenario == 3.2: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3_2' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="cell_data", n_latent=2, num_epochs=1500, batch_size=32, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space_2nlatent() vae.visualize_latent_layer_unsupervised() #### n_latent == 20 #### if scenario == 3.3: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3_3' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="cell_data", n_latent=20, num_epochs=1500, batch_size=32, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() if scenario == 3.4: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3_4' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="cell_data", n_latent=20, num_epochs=1500, batch_size=32, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space() #### n_latent == 200 #### if scenario == 3.5: # Train standard variational autoencoder output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3_5' viz = Visualization(output_dir=output_dir) vae = VariationalAutoEncoder(visualization=viz) vae.main(data_set="cell_data", n_latent=200, num_epochs=1500, batch_size=32, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space(nx=20) if scenario == 3.6: # Train variational autoencoder with convolutional layers output_dir = os.path.dirname(os.path.abspath(__file__)) + '/figures_scenario_3_6' viz = Visualization(output_dir=output_dir) vae = VaeConvNet(visualization=viz) vae.main(data_set="cell_data", n_latent=200, num_epochs=1500, batch_size=32, downsampling=None) vae.test_vae(downsampling=10) vae.visualize_train_images_original(100) vae.construct_images_from_scratch(100) vae.visualize_latent_space(nx=20)
46.865724
116
0.6966
1,660
13,263
5.225301
0.046386
0.068481
0.058105
0.038045
0.949389
0.944778
0.939013
0.927715
0.923449
0.923449
0
0.044786
0.19528
13,263
282
117
47.031915
0.767919
0.097188
0
0.723005
0
0
0.054144
0.032231
0
0
0
0
0
1
0
false
0
0.032864
0
0.032864
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d63180f401d45303e4bbe66f6f3162a436f72430
13,929
py
Python
trading-app/abis_and_keys.py
mostafajoma/project-3
60c8e96f33cdc13634d6cd5bb4c3971ff20346c7
[ "Apache-2.0" ]
2
2020-06-03T23:18:32.000Z
2020-06-24T23:15:43.000Z
trading-app/abis_and_keys.py
mostafajoma/PEConnect
60c8e96f33cdc13634d6cd5bb4c3971ff20346c7
[ "Apache-2.0" ]
null
null
null
trading-app/abis_and_keys.py
mostafajoma/PEConnect
60c8e96f33cdc13634d6cd5bb4c3971ff20346c7
[ "Apache-2.0" ]
null
null
null
# Contract address for Sale Deployer owner_address = '<Insert address contract is deployed from>' owner_private_key = '<Insert key to above account>' deployer_contract_address = '<Insert address of Sale Deployer contract>' # ABI for Sale Deployer deployer_abi = '''[ { "inputs": [ { "internalType": "string", "name": "name", "type": "string" }, { "internalType": "string", "name": "symbol", "type": "string" }, { "internalType": "address payable", "name": "wallet", "type": "address" }, { "internalType": "uint256", "name": "goal", "type": "uint256" } ], "payable": false, "stateMutability": "nonpayable", "type": "constructor" }, { "constant": true, "inputs": [], "name": "token_address", "outputs": [ { "internalType": "address", "name": "", "type": "address" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "token_sale_address", "outputs": [ { "internalType": "address", "name": "", "type": "address" } ], "payable": false, "stateMutability": "view", "type": "function" } ]''' coin_abi = '''[ { "inputs": [ { "internalType": "string", "name": "name", "type": "string" }, { "internalType": "string", "name": "symbol", "type": "string" }, { "internalType": "uint256", "name": "initial_supply", "type": "uint256" } ], "payable": false, "stateMutability": "nonpayable", "type": "constructor" }, { "anonymous": false, "inputs": [ { "indexed": true, "internalType": "address", "name": "owner", "type": "address" }, { "indexed": true, "internalType": "address", "name": "spender", "type": "address" }, { "indexed": false, "internalType": "uint256", "name": "value", "type": "uint256" } ], "name": "Approval", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": true, "internalType": "address", "name": "account", "type": "address" } ], "name": "MinterAdded", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": true, "internalType": "address", "name": "account", "type": "address" } ], "name": "MinterRemoved", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": true, "internalType": "address", "name": "from", "type": "address" }, { "indexed": true, "internalType": "address", "name": "to", "type": "address" }, { "indexed": false, "internalType": "uint256", "name": "value", "type": "uint256" } ], "name": "Transfer", "type": "event" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "account", "type": "address" } ], "name": "addMinter", "outputs": [], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [ { "internalType": "address", "name": "owner", "type": "address" }, { "internalType": "address", "name": "spender", "type": "address" } ], "name": "allowance", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "spender", "type": "address" }, { "internalType": "uint256", "name": "amount", "type": "uint256" } ], "name": "approve", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [ { "internalType": "address", "name": "account", "type": "address" } ], "name": "balanceOf", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "decimals", "outputs": [ { "internalType": "uint8", "name": "", "type": "uint8" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "spender", "type": "address" }, { "internalType": "uint256", "name": "subtractedValue", "type": "uint256" } ], "name": "decreaseAllowance", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "spender", "type": "address" }, { "internalType": "uint256", "name": "addedValue", "type": "uint256" } ], "name": "increaseAllowance", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [ { "internalType": "address", "name": "account", "type": "address" } ], "name": "isMinter", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "account", "type": "address" }, { "internalType": "uint256", "name": "amount", "type": "uint256" } ], "name": "mint", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [], "name": "name", "outputs": [ { "internalType": "string", "name": "", "type": "string" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [], "name": "renounceMinter", "outputs": [], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [], "name": "symbol", "outputs": [ { "internalType": "string", "name": "", "type": "string" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "totalSupply", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "recipient", "type": "address" }, { "internalType": "uint256", "name": "amount", "type": "uint256" } ], "name": "transfer", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "sender", "type": "address" }, { "internalType": "address", "name": "recipient", "type": "address" }, { "internalType": "uint256", "name": "amount", "type": "uint256" } ], "name": "transferFrom", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" } ]''' # ABI for Crowdsale sale_abi = '''[ { "inputs": [ { "internalType": "uint256", "name": "rate", "type": "uint256" }, { "internalType": "string", "name": "name", "type": "string" }, { "internalType": "string", "name": "symbol", "type": "string" }, { "internalType": "address payable", "name": "wallet", "type": "address" }, { "internalType": "contract PupperCoin", "name": "token", "type": "address" }, { "internalType": "uint256", "name": "goal", "type": "uint256" }, { "internalType": "uint256", "name": "open", "type": "uint256" }, { "internalType": "uint256", "name": "close", "type": "uint256" } ], "payable": false, "stateMutability": "nonpayable", "type": "constructor" }, { "anonymous": false, "inputs": [], "name": "CrowdsaleFinalized", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "internalType": "uint256", "name": "prevClosingTime", "type": "uint256" }, { "indexed": false, "internalType": "uint256", "name": "newClosingTime", "type": "uint256" } ], "name": "TimedCrowdsaleExtended", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": true, "internalType": "address", "name": "purchaser", "type": "address" }, { "indexed": true, "internalType": "address", "name": "beneficiary", "type": "address" }, { "indexed": false, "internalType": "uint256", "name": "value", "type": "uint256" }, { "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" } ], "name": "TokensPurchased", "type": "event" }, { "payable": true, "stateMutability": "payable", "type": "fallback" }, { "constant": true, "inputs": [ { "internalType": "address", "name": "account", "type": "address" } ], "name": "balanceOf", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "beneficiary", "type": "address" } ], "name": "buyTokens", "outputs": [], "payable": true, "stateMutability": "payable", "type": "function" }, { "constant": true, "inputs": [], "name": "cap", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "capReached", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address payable", "name": "refundee", "type": "address" } ], "name": "claimRefund", "outputs": [], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [], "name": "closingTime", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [], "name": "finalize", "outputs": [], "payable": false, "stateMutability": "nonpayable", "type": "function" }, { "constant": true, "inputs": [], "name": "finalized", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "goal", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "goalReached", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "hasClosed", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "isOpen", "outputs": [ { "internalType": "bool", "name": "", "type": "bool" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "openingTime", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "rate", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "token", "outputs": [ { "internalType": "contract IERC20", "name": "", "type": "address" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "wallet", "outputs": [ { "internalType": "address payable", "name": "", "type": "address" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": true, "inputs": [], "name": "weiRaised", "outputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, { "constant": false, "inputs": [ { "internalType": "address", "name": "beneficiary", "type": "address" } ], "name": "withdrawTokens", "outputs": [], "payable": false, "stateMutability": "nonpayable", "type": "function" } ]'''
15.900685
72
0.495082
1,028
13,929
6.696498
0.093385
0.060721
0.145119
0.103574
0.855462
0.827426
0.805636
0.763074
0.739105
0.718332
0
0.016863
0.267715
13,929
876
73
15.900685
0.658039
0.005313
0
0.638596
0
0
0.989027
0.004837
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
c39ac12d7daaf2faf1d9e8f9ad05d358665528fd
253
py
Python
tests/data/fmtonoff3.py
rbenton/black
2bae41f92ed125f687e0ddef3a5913cda755a64f
[ "MIT" ]
null
null
null
tests/data/fmtonoff3.py
rbenton/black
2bae41f92ed125f687e0ddef3a5913cda755a64f
[ "MIT" ]
null
null
null
tests/data/fmtonoff3.py
rbenton/black
2bae41f92ed125f687e0ddef3a5913cda755a64f
[ "MIT" ]
null
null
null
# fmt: off x = [ 1, 2, 3, 4, ] # fmt: on # fmt: off x = [ 1, 2, 3, 4, ] # fmt: on x = [ 1, 2, 3, 4 ] # output # fmt: off x = [ 1, 2, 3, 4, ] # fmt: on # fmt: off x = [ 1, 2, 3, 4, ] # fmt: on x = [ 1, 2, 3, 4 ]
7.027778
18
0.316206
47
253
1.702128
0.191489
0.15
0.225
0.3
0.925
0.925
0.925
0.925
0.925
0.925
0
0.175182
0.458498
253
35
19
7.228571
0.408759
0.29249
0
0.65
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
c3e5f94ce119d23b76806f4351cfffbaffe22774
15,074
py
Python
test/test_information.py
thyamu/Neet
cdc55fdb25700e44bcdb4f496b91d21a61a81c83
[ "MIT" ]
null
null
null
test/test_information.py
thyamu/Neet
cdc55fdb25700e44bcdb4f496b91d21a61a81c83
[ "MIT" ]
null
null
null
test/test_information.py
thyamu/Neet
cdc55fdb25700e44bcdb4f496b91d21a61a81c83
[ "MIT" ]
null
null
null
import numpy as np import unittest from neet.boolean.examples import s_pombe from neet.information import Architecture class TestInformation(unittest.TestCase): """ Test the information analysis module """ def test_canary(self): """ A canary test to ensure the test suite is working """ self.assertEqual(3, 1 + 2) # def test_active_info_not_network(self): # """ # Raise a ``TypeError`` if the provided network is not actually a network # """ # with self.assertRaises(TypeError): # active_information(5, k=3, timesteps=10, local=False) # with self.assertRaises(TypeError): # active_information(5, k=3, timesteps=10, local=True) # # def test_active_info_s_pombe(self): # """ # ``active_information`` computes the correct values for ``s_pombe`` # """ # known_ai = [0.0, 0.408344, 0.629567, 0.629567, 0.379157, 0.400462, # 0.670196, 0.670196, 0.391891] # computed_ai = active_information(s_pombe, k=5, timesteps=20) # self.assertEqual(9, len(computed_ai)) # for got, expected in zip(computed_ai, known_ai): # self.assertAlmostEqual(expected, got, places=6) # # def test_local_active_info_s_pombe(self): # """ # local ``active_information`` averages to the correct values for # ``s_pombe`` # """ # known_ai = [0.0, 0.408344, 0.629567, 0.629567, 0.379157, 0.400462, # 0.670196, 0.670196, 0.391891] # computed_ai = active_information( # s_pombe, k=5, timesteps=20, local=True) # self.assertEqual((9, 512, 16), computed_ai.shape) # for got, expected in zip(computed_ai, known_ai): # self.assertAlmostEqual(expected, np.mean(got), places=6) # # def test_entropy_rate_not_network(self): # """ # Raise a ``TypeError`` if the provided network is not actually a network # """ # with self.assertRaises(TypeError): # entropy_rate(5, k=3, timesteps=10, local=False) # with self.assertRaises(TypeError): # entropy_rate(5, k=3, timesteps=10, local=True) # # def test_entropy_rate_s_pombe(self): # """ # ``entropy_rate`` computes the correct values for ``s_pombe`` # """ # known_er = [0.0, 0.016912, 0.072803, 0.072803, 0.058420, 0.024794, # 0.032173, 0.032173, 0.089669] # computed_er = entropy_rate(s_pombe, k=5, timesteps=20) # self.assertEqual(9, len(computed_er)) # for got, expected in zip(computed_er, known_er): # self.assertAlmostEqual(expected, got, places=6) # # def test_local_entropy_rate_s_pombe(self): # """ # local ``entropy_rate`` averages to the correct values for ``s_pombe`` # """ # known_er = [0.0, 0.016912, 0.072803, 0.072803, 0.058420, 0.024794, # 0.032173, 0.032173, 0.089669] # computed_er = entropy_rate(s_pombe, k=5, timesteps=20, local=True) # self.assertEqual((9, 512, 16), computed_er.shape) # for got, expected in zip(computed_er, known_er): # self.assertAlmostEqual(expected, np.mean(got), places=6) # # def test_transfer_entropy_not_network(self): # """ # Raise a ``TypeError`` if the provided network is not actually a network # """ # with self.assertRaises(TypeError): # transfer_entropy(5, k=3, timesteps=10, local=False) # with self.assertRaises(TypeError): # transfer_entropy(5, k=3, timesteps=10, local=True) # # def test_transfer_entropy_s_pombe(self): # """ # ``transfer_entropy`` computes the correct values for ``s_pombe`` # """ # known_te = np.asarray( # [[0., 0., 0., 0., 0., 0., 0., 0., 0.], # [0., 0., 0., 0., 0.016912, 0., 0., 0., 0.], # [0., 0.051370, 0., 0.012225, 0.019947, # 0.051370, 0.006039, 0.006039, 0.072803], # [0., 0.051370, 0.012225, 0., 0.019947, # 0.051370, 0.006039, 0.006039, 0.072803], # [0., 0.058420, 0.047602, 0.047602, 0., # 0.058420, 0.047602, 0.047602, 0.], # [0., 0., 0.024794, 0.024794, 0., 0., 0.024794, 0.024794, 0.], # [0., 0.016690, 0.004526, 0.004526, 0.011916, # 0.016690, 0., 0.002983, 0.032173], # [0., 0.016690, 0.004526, 0.004526, 0.011916, # 0.016690, 0.002983, 0., 0.032173], # [0., 0.060304, 0.048289, 0.048289, 0.089669, 0.060304, # 0.048927, 0.048927, 0.]]) # computed_te = transfer_entropy(s_pombe, k=5, timesteps=20) # self.assertEqual(known_te.shape, computed_te.shape) # for got, expected in zip(computed_te.flatten(), known_te.flatten()): # self.assertAlmostEqual(expected, got, places=6) # # def test_local_transfer_entropy_s_pombe(self): # """ # local ``transfer_entropy`` averages to the correct values for # ``s_pombe`` # """ # known_te = np.asarray( # [[0., 0., 0., 0., 0., 0., 0., 0., 0.], # [0., 0., 0., 0., 0.016912, 0., 0., 0., 0.], # [0., 0.051370, 0., 0.012225, 0.019947, # 0.051370, 0.006039, 0.006039, 0.072803], # [0., 0.051370, 0.012225, 0., 0.019947, # 0.051370, 0.006039, 0.006039, 0.072803], # [0., 0.058420, 0.047602, 0.047602, 0., # 0.058420, 0.047602, 0.047602, 0.], # [0., 0., 0.024794, 0.024794, 0., 0., 0.024794, 0.024794, 0.], # [0., 0.016690, 0.004526, 0.004526, 0.011916, # 0.016690, 0., 0.002983, 0.032173], # [0., 0.016690, 0.004526, 0.004526, 0.011916, # 0.016690, 0.002983, 0., 0.032173], # [0., 0.060304, 0.048289, 0.048289, 0.089669, 0.060304, # 0.048927, 0.048927, 0.]]) # computed_te = transfer_entropy(s_pombe, k=5, timesteps=20, local=True) # self.assertEqual((9, 9, 512, 16), computed_te.shape) # for i in range(9): # for j in range(9): # self.assertAlmostEqual( # known_te[i, j], np.mean(computed_te[i, j]), places=6) # # def test_mutual_information_not_network(self): # """ # Raise a ``TypeError`` if the provided network is not actually a network # """ # with self.assertRaises(TypeError): # mutual_information(5, timesteps=10, local=False) # with self.assertRaises(TypeError): # mutual_information(5, timesteps=10, local=True) # # def test_mutual_information_s_pombe(self): # """ # ``mutual_information`` computes the correct values for ``s_pombe`` # """ # known_mi = np.asarray( # [[0.162326, 0.013747, 0.004285, 0.004285, 0.013409, 0.015862, # 0.005170, 0.005170, 0.011028], # [0.013747, 0.566610, 0.007457, 0.007457, 0.006391, 0.327908, # 0.006761, 0.006761, 0.004683], # [0.004285, 0.007457, 0.838373, 0.475582, 0.211577, 0.004329, # 0.459025, 0.459025, 0.127557], # [0.004285, 0.007457, 0.475582, 0.838373, 0.211577, 0.004329, # 0.459025, 0.459025, 0.127557], # [0.013409, 0.006391, 0.211577, 0.211577, 0.574591, 0.007031, # 0.175608, 0.175608, 0.012334], # [0.015862, 0.327908, 0.004329, 0.004329, 0.007031, 0.519051, # 0.006211, 0.006211, 0.002607], # [0.005170, 0.006761, 0.459025, 0.459025, 0.175608, 0.006211, # 0.808317, 0.493495, 0.103905], # [0.005170, 0.006761, 0.459025, 0.459025, 0.175608, 0.006211, # 0.493495, 0.808317, 0.103905], # [0.011028, 0.004683, 0.127557, 0.127557, 0.012334, 0.002607, # 0.103905, 0.103905, 0.634238]]) # computed_mi = mutual_information(s_pombe, timesteps=20) # self.assertEqual(known_mi.shape, computed_mi.shape) # for got, expected in zip(computed_mi.flatten(), known_mi.flatten()): # self.assertAlmostEqual(expected, got, places=6) # # def test_local_mutual_information_s_pombe(self): # """ # local ``mutual_information`` averages to the correct values for # ``s_pombe`` # """ # known_mi = np.asarray( # [[0.162326, 0.013747, 0.004285, 0.004285, 0.013409, 0.015862, # 0.005170, 0.005170, 0.011028], # [0.013747, 0.566610, 0.007457, 0.007457, 0.006391, 0.327908, # 0.006761, 0.006761, 0.004683], # [0.004285, 0.007457, 0.838373, 0.475582, 0.211577, 0.004329, # 0.459025, 0.459025, 0.127557], # [0.004285, 0.007457, 0.475582, 0.838373, 0.211577, 0.004329, # 0.459025, 0.459025, 0.127557], # [0.013409, 0.006391, 0.211577, 0.211577, 0.574591, 0.007031, # 0.175608, 0.175608, 0.012334], # [0.015862, 0.327908, 0.004329, 0.004329, 0.007031, 0.519051, # 0.006211, 0.006211, 0.002607], # [0.005170, 0.006761, 0.459025, 0.459025, 0.175608, 0.006211, # 0.808317, 0.493495, 0.103905], # [0.005170, 0.006761, 0.459025, 0.459025, 0.175608, 0.006211, # 0.493495, 0.808317, 0.103905], # [0.011028, 0.004683, 0.127557, 0.127557, 0.012334, 0.002607, # 0.103905, 0.103905, 0.634238]]) # computed_mi = mutual_information(s_pombe, timesteps=20, local=True) # self.assertEqual((9, 9, 512, 21), computed_mi.shape) # for i in range(9): # for j in range(9): # self.assertAlmostEqual( # known_mi[i, j], np.mean(computed_mi[i, j]), places=6) def test_architecture_ai(self): """ The architecture correctly computes the active information """ k, timesteps = 5, 20 arch = Architecture(s_pombe, k=k, timesteps=timesteps) expected_ai = np.asarray([0.0, 0.408344, 0.629567, 0.629567, 0.379157, 0.400462, 0.670196, 0.670196, 0.391891]) got_ai = arch.active_information() self.assertEqual(got_ai.shape, expected_ai.shape) self.assertTrue(np.allclose(got_ai, expected_ai, atol=1e-6)) got_ai = arch.active_information(local=True) self.assertEqual((9, 512, 16), got_ai.shape) self.assertTrue(np.allclose(np.mean(got_ai, axis=(1, 2)), expected_ai, atol=1e-6)) def test_architecture_er(self): """ The architecture correctly computes the entropy rate """ k, timesteps = 5, 20 arch = Architecture(s_pombe, k=k, timesteps=timesteps) expected_er = np.asarray([0.0, 0.016912, 0.072803, 0.072803, 0.058420, 0.024794, 0.032173, 0.032173, 0.089669]) got_er = arch.entropy_rate() self.assertEqual(got_er.shape, expected_er.shape) self.assertTrue(np.allclose(got_er, expected_er, atol=1e-6)) got_er = arch.entropy_rate(local=True) self.assertEqual((9, 512, 16), got_er.shape) self.assertTrue(np.allclose(np.mean(got_er, axis=(1, 2)), expected_er, atol=1e-6)) def test_architecture_te(self): """ The architecture correctly computes the transfer entropy """ k, timesteps = 5, 20 arch = Architecture(s_pombe, k=k, timesteps=timesteps) expected_te = np.asarray( [[0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 0.000000, 0.000000, 0.016912, 0.000000, 0.000000, 0.000000, 0.000000], [0.000000, 0.051370, 0.000000, 0.012225, 0.019947, 0.051370, 0.006039, 0.006039, 0.072803], [0.000000, 0.051370, 0.012225, 0.000000, 0.019947, 0.051370, 0.006039, 0.006039, 0.072803], [0.000000, 0.058420, 0.047602, 0.047602, 0.000000, 0.058420, 0.047602, 0.047602, 0.000000], [0.000000, 0.000000, 0.024794, 0.024794, 0.000000, 0.000000, 0.024794, 0.024794, 0.000000], [0.000000, 0.016690, 0.004526, 0.004526, 0.011916, 0.016690, 0.000000, 0.002983, 0.032173], [0.000000, 0.016690, 0.004526, 0.004526, 0.011916, 0.016690, 0.002983, 0.000000, 0.032173], [0.000000, 0.060304, 0.048289, 0.048289, 0.089669, 0.060304, 0.048927, 0.048927, 0.000000]]) got_te = arch.transfer_entropy() self.assertEqual(got_te.shape, expected_te.shape) self.assertTrue(np.allclose(got_te, expected_te, atol=1e-6)) got_te = arch.transfer_entropy(local=True) self.assertEqual((9, 9, 512, 16), got_te.shape) self.assertTrue(np.allclose(np.mean(got_te, axis=(2, 3)), expected_te, atol=1e-6)) def test_architecture_mi(self): """ The architecture correctly computes the mutual information """ k, timesteps = 5, 20 arch = Architecture(s_pombe, k=k, timesteps=timesteps) expected_mi = np.asarray( [[0.162326, 0.013747, 0.004285, 0.004285, 0.013409, 0.015862, 0.005170, 0.005170, 0.011028], [0.013747, 0.566610, 0.007457, 0.007457, 0.006391, 0.327908, 0.006761, 0.006761, 0.004683], [0.004285, 0.007457, 0.838373, 0.475582, 0.211577, 0.004329, 0.459025, 0.459025, 0.127557], [0.004285, 0.007457, 0.475582, 0.838373, 0.211577, 0.004329, 0.459025, 0.459025, 0.127557], [0.013409, 0.006391, 0.211577, 0.211577, 0.574591, 0.007031, 0.175608, 0.175608, 0.012334], [0.015862, 0.327908, 0.004329, 0.004329, 0.007031, 0.519051, 0.006211, 0.006211, 0.002607], [0.005170, 0.006761, 0.459025, 0.459025, 0.175608, 0.006211, 0.808317, 0.493495, 0.103905], [0.005170, 0.006761, 0.459025, 0.459025, 0.175608, 0.006211, 0.493495, 0.808317, 0.103905], [0.011028, 0.004683, 0.127557, 0.127557, 0.012334, 0.002607, 0.103905, 0.103905, 0.634238]]) got_mi = arch.mutual_information() self.assertEqual(got_mi.shape, expected_mi.shape) self.assertTrue(np.allclose(got_mi, expected_mi, atol=1e-6)) got_mi = arch.mutual_information(local=True) self.assertEqual((9, 9, 512, 21), got_mi.shape) self.assertTrue(np.allclose(np.mean(got_mi, axis=(2, 3)), expected_mi, atol=1e-6))
48.159744
90
0.549224
2,022
15,074
3.997527
0.073689
0.019795
0.017073
0.014846
0.902388
0.828776
0.77397
0.764815
0.725226
0.704441
0
0.330553
0.300186
15,074
312
91
48.314103
0.435681
0.597519
0
0.211765
0
0
0
0
0
0
0
0
0.2
1
0.058824
false
0
0.047059
0
0.117647
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
615f2820e948fc9a2b7622cf13f33e797ef92afa
143
py
Python
mmseg/utils/__init__.py
wzpscott/SegformerDistillation
6558757f5071251410e90270e197755860a6f41c
[ "DOC" ]
903
2021-06-13T04:45:03.000Z
2022-03-31T13:21:50.000Z
mmseg/utils/__init__.py
zots0127/SegFormer
93301b33d7b7634b018386681be3a640f5979957
[ "DOC" ]
72
2021-06-13T13:01:49.000Z
2022-03-30T09:19:34.000Z
mmseg/utils/__init__.py
zots0127/SegFormer
93301b33d7b7634b018386681be3a640f5979957
[ "DOC" ]
140
2021-06-13T12:24:39.000Z
2022-03-31T12:57:14.000Z
from .collect_env import collect_env from .logger import get_root_logger, print_log __all__ = ['get_root_logger', 'collect_env', 'print_log']
28.6
57
0.797203
22
143
4.590909
0.454545
0.29703
0.257426
0
0
0
0
0
0
0
0
0
0.104895
143
4
58
35.75
0.789063
0
0
0
0
0
0.244755
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
7
61a975eb9e30dc79b2f4fa800163b8280c435d06
57,774
py
Python
Agents/DepreciatedClasses/Minimax.py
karlflores/WatchYourBackProject
00a7c32e46ea0b75580d17ea6a22372e4a005627
[ "Unlicense" ]
null
null
null
Agents/DepreciatedClasses/Minimax.py
karlflores/WatchYourBackProject
00a7c32e46ea0b75580d17ea6a22372e4a005627
[ "Unlicense" ]
null
null
null
Agents/DepreciatedClasses/Minimax.py
karlflores/WatchYourBackProject
00a7c32e46ea0b75580d17ea6a22372e4a005627
[ "Unlicense" ]
null
null
null
''' * Implements the mini-max algorithm based on the minimax_mode structure * and the player file ''' from math import inf from Agents.Minimax_Node import Node, UndoNode from Constants import constant from DepreciatedBoard.Board import Board from Evaluation.Policies import Evaluation from copy import deepcopy from time import time, sleep ''' THIS CLASS A DEPRECIATED AND NO LONGER IS IN USE -- BROKEN CODE SWITCHED TO NEGAMAX INSTEAD OF THIS ''' class MinimaxAB(object): def __init__(self): # we want to create a node self.visited = set() ''' * Alpha Beta - Minimax Driver Function ''' def iterative_deepening_alpha_beta(self,root): MAX_ITER = 20 # default policy if len(root.available_moves) == 0: return None else: move = root.available_moves[0] # time allocated per move in ms time_alloc = 1000 # get time start_time = Minimax.curr_millisecond_time() # iterative deepening begins here for depth in range(1, MAX_ITER): print(depth) move = self.alpha_beta_minimax(depth, root) sleep(0.05) if Minimax.curr_millisecond_time() - start_time > time_alloc: break return move @staticmethod def curr_millisecond_time(): return int(time()*1000) def alpha_beta_minimax(self,depth,root): self.visited = set() # print the available mvoes of the alpha beta call #print(root.available_moves) #print("*"*20) #root.board.print_board() #print("*"*20) # generate the child nodes of the root node and run minimax on these # nodes -- choose the node that has the best value # initially the best move has not been found # essentially we just need to do a min search on the child nodes # of the root -- do this week alpha-beta pruning best_move = None alpha = -inf beta = inf evaluate = -inf child_nodes = [] for action in root.available_moves: child_nodes.append(Minimax.create_node(root.board, Board.get_opp_piece_type(root.colour),action)) child_nodes.sort(reverse=False) for child in child_nodes: # if there is a symmetry of the board state that matches a visited state # then we can skip this node -- don't need to explore it anymore if self.check_symmetry(child.board.board_state) is True: continue evaluate_min = self.min_value(child,depth-1,alpha,beta) if evaluate_min > evaluate: best_move = child.move_applied evaluate = evaluate_min alpha = max(evaluate, alpha) if beta < alpha: break return best_move # find the action associated with eval def max_value(self,node, depth, alpha, beta): evaluate = -inf if Minimax.cutoff_test(node,depth): return Minimax.evaluate_node(node) # visit each available move # print(node.available_moves) child_nodes = [] for action in node.available_moves: child_nodes.append(Minimax.create_node(node.board, Board.get_opp_piece_type(node.colour),action)) child_nodes.sort(reverse=False) for child in child_nodes: # if self.check_symmetry(child.board.board_state) is True: # continue # make a new node for each available node -- this child is now the opposite colour # child = M inimax.create_node(node.board, DepreciatedBoard.get_opp_piece_type(node.colour), action) # get the minimax value for this node evaluate = max(evaluate, self.min_value(child, depth-1, alpha, beta)) alpha = max(evaluate,alpha) if alpha >= beta: break node.minimax = evaluate return evaluate def min_value(self,node, depth, alpha, beta): # beginning evaluation value evaluate = inf if Minimax.cutoff_test(node, depth): return Minimax.evaluate_node(node) # print(node.available_moves) child_nodes = [] for action in node.available_moves: # apply the move to the child node, this node is now the opposite colour child_nodes.append(Minimax.create_node(node.board, Board.get_opp_piece_type(node.colour), action)) child_nodes.sort(reverse=True) for child in child_nodes: # need to check if this is valid -- skip over all previously visied symmetric nodes #if self.check_symmetry(child.board.board_state) is True: # print("xxxxxx") # continue evaluate = min(evaluate, self.max_value(child, depth-1, alpha, beta)) beta = min(beta, evaluate) if beta <= alpha: break node.minimax = evaluate return evaluate @staticmethod def create_node(board,colour,move): # colour is the colour this player with the move from the previous player applied # therefore move is the opposite colour player # create a new node object based on the board node = Node(board,colour) if node is None: return None # apply the move to the board node.move_applied = move # apply this move to the node if move is not None: node.board.update_board(move, Board.get_opp_piece_type(colour)) else: pass #print("Move is None: WTFFFFFFF") # get the available moves based on what phase the board is in if node.board.phase == constant.PLACEMENT_PHASE and node.board.move_counter == 24: node.board.phase = constant.MOVING_PHASE node.board.move_counter = 0 if node.board.phase == constant.PLACEMENT_PHASE: Minimax.update_available_nodes_placement(node) elif node.board.phase == constant.MOVING_PHASE: if node.board.move_counter == 24: node.available_moves = [] # generate the moves that you can apply to this node node.available_moves = Minimax.generate_moves(node, colour) return node @staticmethod def generate_moves(node,colour): available_moves = [] if node.board.phase == constant.MOVING_PHASE : for move in node.board.piece_pos[colour]: for move_type in range(constant.MAX_MOVETYPE): if node.board.is_legal_move(move,move_type): available_moves.append((move,move_type)) return available_moves @staticmethod def cutoff_test(node, depth): if depth == 0: return True if Minimax.is_terminal(node): return True return False @staticmethod def evaluate_node(node): return Evaluation.basic_policy(node.board,node.colour) # update the available moves of the search algorithm after it has been instantiated def update_available_moves(self, available_moves): self.node.available_moves = available_moves def update_board(self, board): self.node.board = deepcopy(board) @staticmethod def is_terminal(node): return node.is_leaf() @staticmethod def update_available_nodes_placement(node): Minimax.init_placable_area(node) for colour in (constant.BLACK_PIECE, constant.WHITE_PIECE): for piece in node.board.piece_pos[colour]: if piece in node.available_moves: node.available_moves.remove(piece) @staticmethod def init_placable_area(node): node.available_moves = [] for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): if Board.within_starting_area((col,row),node.colour): node.available_moves.append((col,row)) def check_symmetry(self,board_state): transformation = Minimax.apply_horizontal_reflection(board_state) board = deepcopy(board_state) if transformation.decode("utf-8") in self.visited: return True else: self.visited.add(board.decode("utf-8")) return False @staticmethod def apply_horizontal_reflection(board_state): temp = '' for index in range(constant.BOARD_SIZE ** 2): temp+= constant.FREE_SPACE temp = bytearray(temp,'utf-8') for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): Board.set_array_char(temp,7-row,7-col, Board.get_array_element(board_state,row,col)) #print(temp) #print(board_state) return temp @staticmethod def undo_move(node): node.board.undo_move() class MinimaxABUndo(object): def __init__(self,board): # we want to create a node self.transposition_table = set() # only use this board to complete the search # save memory self.board = deepcopy(board) # test the dictionary for the available moves # each piece has their available able self.available_actions = {constant.WHITE_PIECE: {}, constant.BLACK_PIECE: {}} # initialise the available actions self.init_available_placement_actions() # print(self.available_actions) ''' * Alpha Beta - Minimax Driver Function ''' def iterative_deepening_alpha_beta(self,root): MAX_ITER = 20 # default policy if len(root.available_moves) == 0: return None else: move = root.available_moves[0] # time allocated per move in ms time_alloc = 1000 # get time start_time = MinimaxAB.curr_millisecond_time() # iterative deepening begins here for depth in range(1, MAX_ITER): print(depth) move = self.alpha_beta_minimax(depth, root) sleep(0.05) if MinimaxAB.curr_millisecond_time() - start_time > time_alloc: break return move @staticmethod def curr_millisecond_time(): return int(time()*1000) def alpha_beta_minimax(self,depth,root): self.transposition_table = set() # print the available mvoes of the alpha beta call # print(root.available_moves) # generate the child nodes of the root node and run minimax on these # nodes -- choose the node that has the best value # initially the best move has not been found # essentially we just need to do a min search on the child nodes # of the root -- do this week alpha-beta pruning best_move = None alpha = -inf evaluate = -inf beta = inf i = 0 for action in root.available_moves: # print("{} Action AB call".format(i)) child = self.create_node(Board.get_opp_piece_type(root.colour), action) self.update_minimax_board(action, child) ''' if self.board.phase == constant.PLACEMENT_PHASE: if TranspositionTable.check_placement_sym(self.transposition_table,self.board.board_state) is True: continue elif self.board.phase == constant.MOVING_PHASE: if TranspositionTable.check_already_visited(self.transposition_table, self.board.board_state): continue ''' ab_evaluate = self.min_value(child, depth-1, alpha, beta) # print(ab_evaluate) if ab_evaluate > evaluate: best_move = child.move_applied evaluate = ab_evaluate self.undo_move() ''' if evaluate >= beta: # print(evaluate) print("AB Best Value: ",end='') print(evaluate, best_move) return best_move alpha = max(alpha, evaluate) ''' # print(best_move) # print(evaluate) # print("AB Best Value: ",end='') # print(evaluate, best_move) return best_move # find the action associated with eval ''' # get a list of the available actions when minimax is called available_actions = self.get_actions(root.colour) # loop through all available actions for action in available_actions: child = self.create_node(DepreciatedBoard.get_opp_piece_type(root.colour),action) # apply the action to the minimax board self.update_minimax_board(action,child) # update all the available actions of the minimax board self.update_available_actions(action) available_actions = self.get_actions(root.colour) print(self.available_actions) return ''' def max_value(self, node, depth, alpha, beta): evaluate = -inf if self.cutoff_test(depth): #print("MAX NODE VAL: ",end='') #val = self.evaluate_node(node) #print(val) #return val return self.evaluate_node(node.colour) # visit each available move #print("MAX MOVES: ",end='') #print(node.available_moves) for action in node.available_moves: ''' if self.board.phase == constant.MOVING_PHASE: if TranspositionTable.check_already_visited(self.transposition_table, self.board.board_state): continue ''' child = self.create_node(Board.get_opp_piece_type(node.colour), action) # update the board representation with the move self.update_minimax_board(action, child) # self.board.print_board() #print(node.available_moves) #print(child.available_moves) # if self.check_symmetry(child.board.board_state) is True: # continue # get the minimax value for this node evaluate = max(evaluate, self.min_value(child, depth-1, alpha, beta)) #print(self.board.action_applied) self.undo_move() ''' if alpha >= beta: #print("UNDO") break ''' if evaluate >= beta: node.minimax = evaluate # print("MAX Best Value: ",end='') # print(evaluate) return evaluate alpha = max(evaluate,alpha) # undo the move so that we can apply the next board move to evaluate minimax value #print("UNDO 2") #self.board.print_board() #self.board.print_board() # print("MAX Best Value: ",end='') # print(evaluate) node.minimax = evaluate return evaluate def min_value(self,node, depth, alpha, beta): # print("CALLED MIN") # beginning evaluation value evaluate = inf if self.cutoff_test(depth): # val = self.evaluate_node(node) # print("MIN NODE VAL: ",end='') # print(val) # return val # print(self.evaluate_node(node)) return self.evaluate_node(node.colour) # print("MIN MOVES: ",end='') # print(node.available_moves) for action in node.available_moves: if self.board.phase == constant.MOVING_PHASE: ''' if TranspositionTable.check_already_visited(self.transposition_table, self.board.board_state): continue ''' # apply the move to the child node, this node is now the opposite colour child = self.create_node(Board.get_opp_piece_type(node.colour), action) self.update_minimax_board(action, child) # self.board.print_board() # print("MIN UPDATE") # self.board.print_board() #print("\nMin Call") #self.board.print_board() evaluate = min(evaluate, self.max_value(child, depth-1, alpha, beta)) # print(action, evaluate) self.undo_move() ''' if beta <= alpha: # when we break from the loop make sure to undo the move break ''' if evaluate <= alpha: node.minimax = evaluate # print("MIN Best Value: ",end='') # print(evaluate) return evaluate beta = min(beta, evaluate) # print("MIN Best Value: ",end='') # print(evaluate) node.minimax = evaluate return evaluate def create_node(self,colour, move): # colour is the colour this player with the move from the previous player applied # therefore move is the opposite colour player # create a new node object based on the board node = UndoNode(self.board, colour) if node is None: return None # store the move applied to the board node.move_applied = move return node def update_minimax_board(self,move,node,start_node=False): # if the move is None -- this could be a forfeit of a move or it could be a start of a search # apply this move to the node if move is not None: self.board.update_board(move, Board.get_opp_piece_type(node.colour)) elif move is None and self.board.move_counter is not 0: # if the move is none and the counter is not zero this is a forfeit self.board.move_counter += 1 self.board.set_player_to_move(self.board.get_opp_piece_type(self.board.player_to_move)) # update minimax board represents the start of the minimax search then we need to initalise # the available moves #print("Move is None: WTFFFFFFF") # get the available moves based on what phase the board is in ''' if self.board.phase == constant.PLACEMENT_PHASE and self.board.move_counter == 24: self.board.phase = constant.MOVING_PHASE self.board.move_counter = 0 ''' # print(self.board.phase,self.board.move_counter) if self.board.phase == constant.PLACEMENT_PHASE: self.update_available_nodes_placement(node) # if the move is none then we need to initalise the available moves of the board ''' if move is None and start_node is True: # then this is the first node in the search -- initialise the placement actions if self.board.move_counter == 0 or self.board.move_counter == 1: self.start_available_actions_placement() return elif start_node is False: # this is a start node but a move has been applied, therefore we just need to update the available # moves list #print(move) self.update_available_placement(move) #print(self.available_actions) return ''' elif self.board.phase == constant.MOVING_PHASE: # generate the moves that you can apply to this node node.available_moves = self.generate_moves(node.colour) # this is the start to the search -- we don't want to update the available_moves list ''' if start_node is True and move is None: # if it is the start to the moving phase -- need to initialise self.init_available_moving_actions() return elif start_node is False: self.update_available_moves(move, node.colour) return ''' def generate_moves(self,colour): available_moves = [] if self.board.phase == constant.MOVING_PHASE : for move in self.board.piece_pos[colour]: for move_type in range(constant.MAX_MOVETYPE): if self.board.is_legal_move(move,move_type): available_moves.append((move,move_type)) return available_moves def cutoff_test(self, depth): if depth == 0: return True if self.is_terminal(self.board): return True return False ''' * NEED TO THINK ABOUT IF THIS FUNCTION JUST EVALUATES THE NODES AT THE ROOT STATE DUE TO THE UNDO MOVES -- NEED TO TEST THIS OUT SOMEHOW, because other than that the algorithm is working as intended -- Need to work out some optimisations of the algorithm though ''' def evaluate_node(self, colour): return Evaluation.basic_policy(self.board, colour) # update the available moves of the search algorithm after it has been instantiated # #def update_available_moves(self, node, available_moves): # node.available_moves = available_moves def update_board(self, board): self.board = deepcopy(board) def is_terminal(self, board): return board.is_terminal(self.board) def update_available_nodes_placement(self, node): MinimaxABUndo.init_placable_area(node) for colour in (constant.BLACK_PIECE, constant.WHITE_PIECE): for piece in self.board.piece_pos[colour]: if piece in node.available_moves: node.available_moves.remove(piece) @staticmethod def init_placable_area(node): node.available_moves = [] for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): if Board.within_starting_area((col,row),node.colour): node.available_moves.append((col,row)) def check_symmetry(self,board_state): transformation = MinimaxABUndo.apply_horizontal_reflection(board_state) board = deepcopy(board_state) if transformation.decode("utf-8") in self.visited: return True else: self.visited.add(board.decode("utf-8")) return False @staticmethod def apply_horizontal_reflection(board_state): temp = '' for index in range(constant.BOARD_SIZE ** 2): temp+= constant.FREE_SPACE temp = bytearray(temp,'utf-8') for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): Board.set_array_char(temp,7-row,7-col, Board.get_array_element(board_state,row,col)) #print(temp) #print(board_state) return temp def undo_move(self): self.board.undo_move() # then we need to recalculate the available moves based on the board representation #self.generate_actions() ''' ################################################################################# # METHODS FOR THE DICTIONARY REPRESENTATION OF THE AVAILABLE MOVES ON THE BOARD # # # # # ################################################################################ ''' # we update the available actions when we update the board representation def generate_actions(self): if self.board.phase == constant.PLACEMENT_PHASE: self.init_available_placement_actions() self.start_available_actions_placement() elif self.board.phase == constant.MOVING_PHASE: self.init_available_moving_actions() def init_available_placement_actions(self): # initialise the dictionary with the available placements on the board for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): piece = col,row # print(col,row) for colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if Board.within_starting_area(piece, colour): temp = {piece: constant.PLACEMENT_PHASE} # print(temp) self.available_actions[colour].update(temp) def start_available_actions_placement(self): # get rid of all pieces that exist on the board for colour in (constant.BLACK_PIECE, constant.WHITE_PIECE): for piece in self.board.piece_pos[colour]: if piece in self.available_actions[colour]: if Board.within_starting_area(piece,colour): self.available_actions[colour].pop(piece) def init_available_moving_actions(self): for colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): for piece in self.available_actions[colour].keys(): self.update_actions_dict_entry(piece,colour) # need to ensure that we call this after an update to the minimax board representation def update_available_moves(self,action,colour): # if there were any eliminated pieces last move retrieve them from the stack -- but make sure not to pop them # off the stack completely eliminated_pieces = self.board.eliminated_pieces_last_move(self.board.phase,self.board.move_counter,pop=False) # action is in the form (position, movetype) # -- i,e. we are moving the piece at position by the movetype # -- when an action is called we have move that piece already and we need to change # -- the entries in the dictionary according to that move # colour is the colour of the piece we have moved # read in the pieces on the board -- if they already exist in the dictionary # then we dont need to do anything -- if they don't exist in the dictionary # need to look at all the eliminated pieces on the board # -- look for pieces in the vicinity of that space # -- delete keys associated with those eliminated pieces as these are pieces on the board # -- that do not exists anymore, therefore there are no associated moves with this piece # -- update the available moves of the pieces that can move into that square # need to update the available moves of the piece at its new location # delete entry in the dictionary that corresponds to the old position old_pos = action[0] print(old_pos) print(action) new_pos = Board.convert_move_type_to_coord(old_pos, action[1]) # first we need to update the dictionary by removing the old piece from the # dictionary -- as this is not an available move anymore if old_pos in self.available_actions[colour]: self.available_actions[colour].pop(old_pos) else: pass # need to raise an error saying # then add an entry into the dictionary corresponding to the new location of the piece # after the move has been applied if new_pos not in self.available_actions[colour]: temp_list = self.get_piece_legal_moves(new_pos) temp_dict = {new_pos: temp_list} self.available_actions.update(temp_dict) else: pass # need to raise an error # remove all eliminated pieces from the dictionary for piece_type in (constant.WHITE_PIECE, constant.BLACK_PIECE): for piece in eliminated_pieces[piece_type]: if piece in self.available_actions[piece_type]: self.available_actions[piece_type].pop(piece) else: pass # need to raise an error # update any piece that is surrounding the old position but also any eliminated pieces and update # their available moves by adding the corresponding move type to that list # this old position is now a free space on the board and therefore pieces are able to now move into it # need to test all positions surround this newly freed space and update their available actions for move_type in range(constant.MAX_MOVETYPE): # iterate through all the possible moves at the old location, checking # whether or not there is a piece there # if there is a piece at that location we can update that piece's available moves piece = Board.convert_move_type_to_coord(old_pos,move_type) for piece_colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if piece in self.available_actions[piece_colour]: if move_type < 4: self.update_actions_dict_entry(piece,piece_colour) else: if self.board.can_jump_into_position(old_pos,move_type): self.update_actions_dict_entry(piece,piece_colour) # update the pieces around any eliminated pieces for piece_colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): # iterate through all the eliminated pieces on the board for elim_piece in eliminated_pieces[piece_colour]: # for each eliminated piece we apply a move (move_type to it), checking if there is a piece # at this position on the board, we do this by checking the available moves dictionary # if there is a piece associated with that position on the board then if it is a one step move # we just need to update that pieces available moves, if it is a jump, then we need to test if there # is an adjacent piece between the jump and the free space -- do this by calling # can_jump_into_position -- for a given space, if we apply a move_type corresponding to a # two piece move, can we jump into this free spot # if we can then we just need to update this pieces available actions piece = Board.convert_move_type_to_coord(elim_piece,move_type) # if this piece corresponds to an entry in the dictionary, then there is a piece at this location if piece in self.available_actions[piece_colour]: # one step moves if move_type < 4: self.update_actions_dict_entry(piece,piece_colour) else: # need to check if a jump is available into the free space # if the piece at the jump location is in the available_action dict if self.board.can_jump_into_position(elim_piece,move_type): self.update_actions_dict_entry(piece,piece_colour) # update the available moves of the pieces that surround where the # new position of the piece is -- this is no longer an occupied space therefore pieces surrounding # it cannot move into this space anymore piece = Board.convert_move_type_to_coord(new_pos,move_type) for piece_colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if piece in self.available_actions[piece_colour]: if move_type < 4: self.update_actions_dict_entry(piece,piece_colour) else: # treat this old position as a free space -- if there are pieces # that can jump into this piece we have to update these pieces available # actions because this space is no longer free if self.board.can_jump_into_position(new_pos,move_type): self.update_actions_dict_entry(piece,piece_colour) # HELPER METHOD THAT ALLOWS TO UPDATE A PARTICULAR PIECES AVAILABLE ACTIONS IN THE DICTIONARY def update_actions_dict_entry(self,piece,colour): temp_list = self.get_piece_legal_moves(piece) update_entry = {piece: temp_list} self.available_actions[colour].update(update_entry) # get a list of the legal moves of a particular piece def get_piece_legal_moves(self,piece): available_moves = [] for move_type in range(constant.MAX_MOVETYPE): if self.board.is_legal_move(piece, move_type): available_moves.append(move_type) return available_moves def update_available_placement(self, action): # to update the available actions in the placement phase we just need to read in the action made # remove this entry from the dictionary # add the entries of any eliminated positions in the dictionary eliminated_pieces = self.board.eliminated_pieces_last_move(self.board.phase,self.board.move_counter,pop=False) print("ELIMINATED: ",end='') print(eliminated_pieces) print("AVAILABLE: ",end='') print(self.available_actions) for colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if Board.within_starting_area(action, colour): # remove the action from the entry of the dictionary if action in self.available_actions[colour]: self.available_actions[colour].pop(action) # add all the eliminated pieces to the available moves of the dictionary for piece in eliminated_pieces[colour]: if Board.within_starting_area(piece, colour): if piece not in self.available_actions[colour]: update_entry = {piece: constant.PLACEMENT_PHASE} self.available_actions[colour].update(update_entry) # return a list of actions corresponding to a particular board state def get_actions(self,colour): actions = [] if self.board.phase == constant.PLACEMENT_PHASE: for key in self.available_actions[colour].keys(): # return a list containing the free spaces on the board that a player can place a piece into actions.append(key) return actions elif self.board.phase == constant.MOVING_PHASE: for key in self.available_actions[colour].keys(): for move_type in self.available_actions[colour][key]: # return a list of the piece_position and the move it can make actions.append((key,move_type)) return actions class MinimaxUndo(object): def __init__(self, board): # we want to create a node self.visited = set() # only use this board to complete the search # save memory self.board = deepcopy(board) # test the dictionary for the available moves # each piece has their available able self.available_actions = {constant.WHITE_PIECE: {}, constant.BLACK_PIECE: {}} # initialise the available actions self.init_available_placement_actions() # print(self.available_actions) ''' * Alpha Beta - Minimax Driver Function ''' def iterative_deepening_alpha_beta(self, root): MAX_ITER = 20 # default policy if len(root.available_moves) == 0: return None else: move = root.available_moves[0] # time allocated per move in ms time_alloc = 1000 # get time start_time = Minimax.curr_millisecond_time() # iterative deepening begins here for depth in range(1, MAX_ITER): print(depth) move = self.alpha_beta_minimax(depth, root) sleep(0.05) if Minimax.curr_millisecond_time() - start_time > time_alloc: break return move @staticmethod def curr_millisecond_time(): return int(time() * 1000) def alpha_beta_minimax(self, depth, root): # print(root.available_moves) # self.board.print_board() self.visited = set() # print the available mvoes of the alpha beta call # print(root.available_moves) # print("*"*20) # root.board.print_board() # print("*"*20) # generate the child nodes of the root node and run minimax on these # nodes -- choose the node that has the best value # initially the best move has not been found # essentially we just need to do a min search on the child nodes # of the root -- do this week alpha-beta pruning best_move = None evaluate = -inf i = 0 for action in root.available_moves: # print("{} Action AB call".format(i)) child = self.create_node(Board.get_opp_piece_type(root.colour), action) self.update_minimax_board(action, child) # print("\nAB call") # self.board.print_board() if self.check_symmetry(self.board.board_state) is True: continue ab_evaluate = self.min_value(child, depth - 1) if ab_evaluate > evaluate: best_move = child.move_applied evaluate = ab_evaluate self.undo_move() return best_move # find the action associated with eval ''' # get a list of the available actions when minimax is called available_actions = self.get_actions(root.colour) # loop through all available actions for action in available_actions: child = self.create_node(DepreciatedBoard.get_opp_piece_type(root.colour),action) # apply the action to the minimax board self.update_minimax_board(action,child) # update all the available actions of the minimax board self.update_available_actions(action) available_actions = self.get_actions(root.colour) print(self.available_actions) return ''' def max_value(self, node, depth): # self.board.print_board() # print(self.board.move_counter) # print(self.board.phase) # print(self.board.piece_pos) # print(self.board.eliminated_pieces) # print(node.available_moves) evaluate = -inf if self.cutoff_test(node, depth): return self.evaluate_node(node.colour) # visit each available move # print(node.available_moves) for action in node.available_moves: # print(node.available_moves) child = self.create_node(Board.get_opp_piece_type(node.colour), action) # update the board representation with the move self.update_minimax_board(action, child) # print("\nMAX CALL") # print("MAX UPDATE") # self.board.print_board() # print(node.available_moves) # print(child.available_moves) # if self.check_symmetry(child.board.board_state) is True: # continue # get the minimax value for this node evaluate = max(evaluate, self.min_value(child, depth - 1)) # print(self.board.action_applied) self.undo_move() # undo the move so that we can apply the next board move to evaluate minimax value # print("UNDO 2") # self.board.print_board() # self.board.print_board() node.minimax = evaluate return evaluate def min_value(self, node, depth): print("CALLED MIN") # beginning evaluation value evaluate = inf if self.cutoff_test(node, depth): self.evaluate_node(node) return self.evaluate_node(node) for action in node.available_moves: # apply the move to the child node, this node is now the opposite colour child = self.create_node(Board.get_opp_piece_type(node.colour), action) self.update_minimax_board(action, child) evaluate = min(evaluate, self.max_value(child, depth - 1)) print(evaluate) self.undo_move() node.minimax = evaluate return evaluate def create_node(self, colour, move): # colour is the colour this player with the move from the previous player applied # therefore move is the opposite colour player # create a new node object based on the board node = UndoNode(self.board, colour) if node is None: return None # store the move applied to the board node.move_applied = move return node def update_minimax_board(self, move, node, start_node=False): # if the move is None -- this could be a forfeit of a move or it could be a start of a search # apply this move to the node if move is not None: self.board.update_board(move, Board.get_opp_piece_type(node.colour)) elif move is None and self.board.move_counter is not 0: # if the move is none and the counter is not zero this is a forfeit self.board.move_counter += 1 self.board.set_player_to_move(self.board.get_opp_piece_type(self.board.player_to_move)) # update minimax board represents the start of the minimax search then we need to initalise # the available moves # print("Move is None: WTFFFFFFF") # get the available moves based on what phase the board is in ''' if self.board.phase == constant.PLACEMENT_PHASE and self.board.move_counter == 24: self.board.phase = constant.MOVING_PHASE self.board.move_counter = 0 ''' # print(self.board.phase,self.board.move_counter) if self.board.phase == constant.PLACEMENT_PHASE: self.update_available_nodes_placement(node) # if the move is none then we need to initalise the available moves of the board ''' if move is None and start_node is True: # then this is the first node in the search -- initialise the placement actions if self.board.move_counter == 0 or self.board.move_counter == 1: self.start_available_actions_placement() return elif start_node is False: # this is a start node but a move has been applied, therefore we just need to update the available # moves list #print(move) self.update_available_placement(move) #print(self.available_actions) return ''' elif self.board.phase == constant.MOVING_PHASE: # generate the moves that you can apply to this node node.available_moves = self.generate_moves(node.colour) # this is the start to the search -- we don't want to update the available_moves list ''' if start_node is True and move is None: # if it is the start to the moving phase -- need to initialise self.init_available_moving_actions() return elif start_node is False: self.update_available_moves(move, node.colour) return ''' def generate_moves(self, colour): available_moves = [] if self.board.phase == constant.MOVING_PHASE: for move in self.board.piece_pos[colour]: for move_type in range(constant.MAX_MOVETYPE): if self.board.is_legal_move(move, move_type): available_moves.append((move, move_type)) return available_moves def cutoff_test(self, node, depth): if depth == 0: return True if self.is_terminal(node): return True return False def evaluate_node(self, node): return Evaluation.basic_policy(self.board, node.colour) # update the available moves of the search algorithm after it has been instantiated # # def update_available_moves(self, node, available_moves): # node.available_moves = available_moves def update_board(self, board): self.board = deepcopy(board) def is_terminal(self, node): return node.is_leaf(self.board) def update_available_nodes_placement(self, node): MinimaxABUndo.init_placable_area(node) for colour in (constant.BLACK_PIECE, constant.WHITE_PIECE): for piece in self.board.piece_pos[colour]: if piece in node.available_moves: node.available_moves.remove(piece) @staticmethod def init_placable_area(node): node.available_moves = [] for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): if Board.within_starting_area((col, row), node.colour): node.available_moves.append((col, row)) def check_symmetry(self, board_state): transformation = MinimaxABUndo.apply_horizontal_reflection(board_state) board = deepcopy(board_state) if transformation.decode("utf-8") in self.visited: return True else: self.visited.add(board.decode("utf-8")) return False @staticmethod def apply_horizontal_reflection(board_state): temp = '' for index in range(constant.BOARD_SIZE ** 2): temp += constant.FREE_SPACE temp = bytearray(temp, 'utf-8') for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): Board.set_array_char(temp, 7 - row, 7 - col, Board.get_array_element(board_state, row, col)) # print(temp) # print(board_state) return temp def undo_move(self): self.board.undo_move() # then we need to recalculate the available moves based on the board representation # self.generate_actions() ''' ################################################################################# # METHODS FOR THE DICTIONARY REPRESENTATION OF THE AVAILABLE MOVES ON THE BOARD # # # # # ################################################################################ ''' # we update the available actions when we update the board representation def generate_actions(self): if self.board.phase == constant.PLACEMENT_PHASE: self.init_available_placement_actions() self.start_available_actions_placement() elif self.board.phase == constant.MOVING_PHASE: self.init_available_moving_actions() def init_available_placement_actions(self): # initialise the dictionary with the available placements on the board for row in range(constant.BOARD_SIZE): for col in range(constant.BOARD_SIZE): piece = col, row # print(col,row) for colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if Board.within_starting_area(piece, colour): temp = {piece: constant.PLACEMENT_PHASE} # print(temp) self.available_actions[colour].update(temp) def start_available_actions_placement(self): # get rid of all pieces that exist on the board for colour in (constant.BLACK_PIECE, constant.WHITE_PIECE): for piece in self.board.piece_pos[colour]: if piece in self.available_actions[colour]: if Board.within_starting_area(piece, colour): self.available_actions[colour].pop(piece) def init_available_moving_actions(self): for colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): for piece in self.available_actions[colour].keys(): self.update_actions_dict_entry(piece, colour) # need to ensure that we call this after an update to the minimax board representation def update_available_moves(self, action, colour): # if there were any eliminated pieces last move retrieve them from the stack -- but make sure not to pop them # off the stack completely eliminated_pieces = self.board.eliminated_pieces_last_move(self.board.phase, self.board.move_counter, pop=False) # action is in the form (position, movetype) # -- i,e. we are moving the piece at position by the movetype # -- when an action is called we have move that piece already and we need to change # -- the entries in the dictionary according to that move # colour is the colour of the piece we have moved # read in the pieces on the board -- if they already exist in the dictionary # then we dont need to do anything -- if they don't exist in the dictionary # need to look at all the eliminated pieces on the board # -- look for pieces in the vicinity of that space # -- delete keys associated with those eliminated pieces as these are pieces on the board # -- that do not exists anymore, therefore there are no associated moves with this piece # -- update the available moves of the pieces that can move into that square # need to update the available moves of the piece at its new location # delete entry in the dictionary that corresponds to the old position old_pos = action[0] print(old_pos) print(action) new_pos = Board.convert_move_type_to_coord(old_pos, action[1]) # first we need to update the dictionary by removing the old piece from the # dictionary -- as this is not an available move anymore if old_pos in self.available_actions[colour]: self.available_actions[colour].pop(old_pos) else: pass # need to raise an error saying # then add an entry into the dictionary corresponding to the new location of the piece # after the move has been applied if new_pos not in self.available_actions[colour]: temp_list = self.get_piece_legal_moves(new_pos) temp_dict = {new_pos: temp_list} self.available_actions.update(temp_dict) else: pass # need to raise an error # remove all eliminated pieces from the dictionary for piece_type in (constant.WHITE_PIECE, constant.BLACK_PIECE): for piece in eliminated_pieces[piece_type]: if piece in self.available_actions[piece_type]: self.available_actions[piece_type].pop(piece) else: pass # need to raise an error # update any piece that is surrounding the old position but also any eliminated pieces and update # their available moves by adding the corresponding move type to that list # this old position is now a free space on the board and therefore pieces are able to now move into it # need to test all positions surround this newly freed space and update their available actions for move_type in range(constant.MAX_MOVETYPE): # iterate through all the possible moves at the old location, checking # whether or not there is a piece there # if there is a piece at that location we can update that piece's available moves piece = Board.convert_move_type_to_coord(old_pos, move_type) for piece_colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if piece in self.available_actions[piece_colour]: if move_type < 4: self.update_actions_dict_entry(piece, piece_colour) else: if self.board.can_jump_into_position(old_pos, move_type): self.update_actions_dict_entry(piece, piece_colour) # update the pieces around any eliminated pieces for piece_colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): # iterate through all the eliminated pieces on the board for elim_piece in eliminated_pieces[piece_colour]: # for each eliminated piece we apply a move (move_type to it), checking if there is a piece # at this position on the board, we do this by checking the available moves dictionary # if there is a piece associated with that position on the board then if it is a one step move # we just need to update that pieces available moves, if it is a jump, then we need to test if there # is an adjacent piece between the jump and the free space -- do this by calling # can_jump_into_position -- for a given space, if we apply a move_type corresponding to a # two piece move, can we jump into this free spot # if we can then we just need to update this pieces available actions piece = Board.convert_move_type_to_coord(elim_piece, move_type) # if this piece corresponds to an entry in the dictionary, then there is a piece at this location if piece in self.available_actions[piece_colour]: # one step moves if move_type < 4: self.update_actions_dict_entry(piece, piece_colour) else: # need to check if a jump is available into the free space # if the piece at the jump location is in the available_action dict if self.board.can_jump_into_position(elim_piece, move_type): self.update_actions_dict_entry(piece, piece_colour) # update the available moves of the pieces that surround where the # new position of the piece is -- this is no longer an occupied space therefore pieces surrounding # it cannot move into this space anymore piece = Board.convert_move_type_to_coord(new_pos, move_type) for piece_colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if piece in self.available_actions[piece_colour]: if move_type < 4: self.update_actions_dict_entry(piece, piece_colour) else: # treat this old position as a free space -- if there are pieces # that can jump into this piece we have to update these pieces available # actions because this space is no longer free if self.board.can_jump_into_position(new_pos, move_type): self.update_actions_dict_entry(piece, piece_colour) # HELPER METHOD THAT ALLOWS TO UPDATE A PARTICULAR PIECES AVAILABLE ACTIONS IN THE DICTIONARY def update_actions_dict_entry(self, piece, colour): temp_list = self.get_piece_legal_moves(piece) update_entry = {piece: temp_list} self.available_actions[colour].update(update_entry) # get a list of the legal moves of a particular piece def get_piece_legal_moves(self, piece): available_moves = [] for move_type in range(constant.MAX_MOVETYPE): if self.board.is_legal_move(piece, move_type): available_moves.append(move_type) return available_moves def update_available_placement(self, action): # to update the available actions in the placement phase we just need to read in the action made # remove this entry from the dictionary # add the entries of any eliminated positions in the dictionary eliminated_pieces = self.board.eliminated_pieces_last_move(self.board.phase, self.board.move_counter, pop=False) print("ELIMINATED: ", end='') print(eliminated_pieces) print("AVAILABLE: ", end='') print(self.available_actions) for colour in (constant.WHITE_PIECE, constant.BLACK_PIECE): if Board.within_starting_area(action, colour): # remove the action from the entry of the dictionary if action in self.available_actions[colour]: self.available_actions[colour].pop(action) # add all the eliminated pieces to the available moves of the dictionary for piece in eliminated_pieces[colour]: if Board.within_starting_area(piece, colour): if piece not in self.available_actions[colour]: update_entry = {piece: constant.PLACEMENT_PHASE} self.available_actions[colour].update(update_entry) # return a list of actions corresponding to a particular board state def get_actions(self, colour): actions = [] if self.board.phase == constant.PLACEMENT_PHASE: for key in self.available_actions[colour].keys(): # return a list containing the free spaces on the board that a player can place a piece into actions.append(key) return actions elif self.board.phase == constant.MOVING_PHASE: for key in self.available_actions[colour].keys(): for move_type in self.available_actions[colour][key]: # return a list of the piece_position and the move it can make actions.append((key, move_type)) return actions
41.32618
120
0.611659
7,219
57,774
4.745948
0.050977
0.03126
0.030355
0.022766
0.939727
0.927556
0.915064
0.901521
0.889729
0.880914
0
0.003014
0.316578
57,774
1,397
121
41.355762
0.864705
0.288071
0
0.882083
0
0
0.002926
0
0
0
0
0
0
1
0.11026
false
0.01072
0.01072
0.013783
0.225115
0.026034
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
61b3e677d7cdeffac08c7a90f1585c65316d9607
15,076
py
Python
src/intercurator_metrics.py
dpavot/PGR-crowd
70767e25c31601c1e1f177105b77a8acb0153059
[ "Apache-2.0" ]
null
null
null
src/intercurator_metrics.py
dpavot/PGR-crowd
70767e25c31601c1e1f177105b77a8acb0153059
[ "Apache-2.0" ]
null
null
null
src/intercurator_metrics.py
dpavot/PGR-crowd
70767e25c31601c1e1f177105b77a8acb0153059
[ "Apache-2.0" ]
null
null
null
import csv import krippendorff import numpy ### EXTERNAL VARIABLES n = 2 # curators N = 2389 # subjects k = 3 # categories def fleiss_kappa(dataset_file, curators, subjects): """ :param dataset_file: :param curators: :param subjects: :return: """ dataset = open(dataset_file, encoding = 'utf-8') dataset_reader = csv.reader(dataset, delimiter = ',') line_count = 0 dict_counts = {} sentence_number = 1 t_count = 0 f_count = 0 u_count = 0 for row in dataset_reader: if line_count == 0: pass elif row and row[0] != '367O8HRHKG9KGF382XKL72J6UFOS44': if row[21] == '' and t_count + f_count + u_count < curators: # not rejected and divisible by the number of curators: if row[28].startswith('Yes'): t_count += 1 elif row[28].startswith('No'): f_count += 1 elif row[28].startswith('The'): u_count += 1 elif row[21] == '' and t_count + f_count + u_count >= curators: dict_counts[sentence_number] = [t_count, f_count, u_count] sentence_number += 1 t_count = 0 f_count = 0 u_count = 0 if row[28].startswith('Yes'): t_count += 1 elif row[28].startswith('No'): f_count += 1 elif row[28].startswith('The'): u_count += 1 elif row and row[0] == '367O8HRHKG9KGF382XKL72J6UFOS44': if sentence_number not in dict_counts: dict_counts[sentence_number] = [7, 0, 0] sentence_number += 1 line_count += 1 sum_pi = 0 t_sum = 0 f_sum = 0 u_sum = 0 for item in dict_counts.items(): pi = (1 / (curators * (curators - 1))) * (item[1][0]**2 + item[1][1]**2 + item[1][2]**2 - curators) sum_pi += pi t_sum += item[1][0] f_sum += item[1][1] u_sum += item[1][2] t = t_sum / (subjects * curators) f = f_sum / (subjects * curators) u = u_sum / (subjects * curators) p = (1 / subjects) * sum_pi pe = t**2 + f**2 + u**2 kappa = (p - pe) / (1 - pe) return kappa #print(fleiss_kappa('data/batch_results_30.csv', n, N)) def krippendorff_alpha(dataset_file, curators): """ :param dataset_file: :param curators: :return: """ dataset = open(dataset_file, encoding = 'utf-8') dataset_reader = csv.reader(dataset, delimiter = ',') line_count = 0 reliability_data = [] t_count = 0 f_count = 0 u_count = 0 t = 1 f = 2 u = 3 subset = [] entered = False for row in dataset_reader: if line_count == 0: pass elif row and row[0] != '367O8HRHKG9KGF382XKL72J6UFOS44': if row[21] == '' and t_count + f_count + u_count < curators: # not rejected and divisible by the number of curators: if row[28].startswith('Yes'): subset.append(t) t_count += 1 elif row[28].startswith('No'): subset.append(f) f_count += 1 elif row[28].startswith('The'): subset.append(u) u_count += 1 elif row[21] == '' and t_count + f_count + u_count >= curators: reliability_data.append(subset) subset = [] t_count = 0 f_count = 0 u_count = 0 if row[28].startswith('Yes'): subset.append(t) t_count += 1 elif row[28].startswith('No'): subset.append(f) f_count += 1 elif row[28].startswith('The'): subset.append(u) u_count += 1 elif row and row[0] == '367O8HRHKG9KGF382XKL72J6UFOS44' and entered == False: reliability_data.append([1, 1, 1, 1, 1, 1, 1]) entered = True line_count += 1 reliability_data = numpy.transpose(reliability_data) return krippendorff.alpha(reliability_data, level_of_measurement = 'nominal') #print(krippendorff_alpha('data/batch_results_30.csv', n)) def expert_extra_kappa(external_rater_file, expert_rater_file, curators, subjects): dict_counts = {} t_count = 0 f_count = 0 u_count = 0 external_rater = open(external_rater_file, encoding='utf-8') external_rater_reader = csv.reader(external_rater, delimiter='\t') line_count = 0 for external_row in external_rater_reader: t_count = 0 f_count = 0 u_count = 0 if line_count == 0: pass else: grading = external_row[11] gene_first = external_row[6] gene_last = external_row[7] phenotype_first = external_row[8] phenotype_last = external_row[9] if int(gene_first) < int(phenotype_first): new_sentence = external_row[1][:int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int(gene_last)] + '</b>' + \ external_row[1][int(gene_last):int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + \ '</b>' + external_row[1][int(phenotype_last):] else: new_sentence = external_row[1][:int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + '</b>' + \ external_row[1][int(phenotype_last):int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int( gene_last)] + \ '</b>' + external_row[1][int(gene_last):] sentence = new_sentence.replace(',', '<span>&#44;</span>') if grading == 'C': t_count = 1 elif grading == 'I': f_count = 1 elif grading == 'U': u_count = 1 dict_counts[sentence] = [t_count, f_count, u_count] line_count += 1 expert_rater = open(expert_rater_file, encoding='utf-8') expert_rater_reader = csv.reader(expert_rater, delimiter='\t') line_count = 0 for external_row in expert_rater_reader: if line_count == 0: pass else: grading = external_row[11].capitalize() gene_first = external_row[6] gene_last = external_row[7] phenotype_first = external_row[8] phenotype_last = external_row[9] if int(gene_first) < int(phenotype_first): new_sentence = external_row[1][:int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int(gene_last)] + '</b>' + \ external_row[1][int(gene_last):int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + \ '</b>' + external_row[1][int(phenotype_last):] else: new_sentence = external_row[1][:int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + '</b>' + \ external_row[1][int(phenotype_last):int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int( gene_last)] + \ '</b>' + external_row[1][int(gene_last):] sentence = new_sentence.replace(',', '<span>&#44;</span>') if grading == 'C': dict_counts[sentence] = [dict_counts[sentence][0] + 1, dict_counts[sentence][1], dict_counts[sentence][2]] elif grading == 'I': dict_counts[sentence] = [dict_counts[sentence][0], dict_counts[sentence][1] + 1, dict_counts[sentence][2]] elif grading.startswith('U'): dict_counts[sentence] = [dict_counts[sentence][0], dict_counts[sentence][1], dict_counts[sentence][2] + 1] line_count += 1 sum_pi = 0 t_sum = 0 f_sum = 0 u_sum = 0 for item in dict_counts.items(): # if item[1][0] + item[1][1] + item[1][2] != 2: # print(item) # elif item[1][1] == 1 and item[1][2] == 1: # print(item) pi = (1 / (curators * (curators - 1))) * (item[1][0] ** 2 + item[1][1] ** 2 + item[1][2] ** 2 - curators) sum_pi += pi t_sum += item[1][0] f_sum += item[1][1] u_sum += item[1][2] t = t_sum / (subjects * curators) f = f_sum / (subjects * curators) u = u_sum / (subjects * curators) p = (1 / subjects) * sum_pi pe = t ** 2 + f ** 2 + u ** 2 kappa = (p - pe) / (1 - pe) return kappa print(expert_extra_kappa('data/external_rater_results.tsv', 'data/expert.tsv', n, N)) def expert_extra_krip(external_rater_file, expert_rater_file): data = {} t = 1 f = 2 u = 3 expert_rater = open(expert_rater_file, encoding='utf-8') expert_rater_reader = csv.reader(expert_rater, delimiter='\t') line_count = 0 for external_row in expert_rater_reader: if line_count == 0: pass else: grading = external_row[11].capitalize() gene_first = external_row[6] gene_last = external_row[7] phenotype_first = external_row[8] phenotype_last = external_row[9] if int(gene_first) < int(phenotype_first): new_sentence = external_row[1][:int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int(gene_last)] + '</b>' + \ external_row[1][int(gene_last):int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + \ '</b>' + external_row[1][int(phenotype_last):] else: new_sentence = external_row[1][:int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + '</b>' + \ external_row[1][int(phenotype_last):int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int( gene_last)] + \ '</b>' + external_row[1][int(gene_last):] sentence = new_sentence.replace(',', '<span>&#44;</span>') if grading == 'C': data[sentence] = [t] elif grading == 'I': data[sentence] = [f] elif grading.startswith('U'): data[sentence] = [u] line_count += 1 external_rater = open(external_rater_file, encoding='utf-8') external_rater_reader = csv.reader(external_rater, delimiter='\t') line_count = 0 for external_row in external_rater_reader: if line_count == 0: pass else: grading = external_row[11] gene_first = external_row[6] gene_last = external_row[7] phenotype_first = external_row[8] phenotype_last = external_row[9] if int(gene_first) < int(phenotype_first): new_sentence = external_row[1][:int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int(gene_last)] + '</b>' + \ external_row[1][int(gene_last):int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + \ '</b>' + external_row[1][int(phenotype_last):] else: new_sentence = external_row[1][:int(phenotype_first)] + '<b>' + external_row[1][ int(phenotype_first):int( phenotype_last)] + '</b>' + \ external_row[1][int(phenotype_last):int(gene_first)] + '<b>' + external_row[1][ int(gene_first):int( gene_last)] + \ '</b>' + external_row[1][int(gene_last):] sentence = new_sentence.replace(',', '<span>&#44;</span>') if grading == 'C': data[sentence].append(t) elif grading == 'I': data[sentence].append(f) elif grading == 'U': data[sentence].append(u) line_count += 1 reliability_data = [] for item in data.items(): reliability_data.append(item[1]) reliability_data = numpy.transpose(reliability_data) external_rater.close() return krippendorff.alpha(reliability_data, level_of_measurement='nominal') print(expert_extra_krip('data/external_rater_results.tsv', 'data/expert.tsv'))
38.167089
129
0.446936
1,535
15,076
4.149186
0.067752
0.110535
0.075365
0.094206
0.870309
0.855864
0.822421
0.795572
0.774062
0.755849
0
0.03838
0.440037
15,076
395
130
38.167089
0.716063
0.033033
0
0.864112
0
0
0.034314
0.01254
0
0
0
0
0
1
0.013937
false
0.020906
0.010453
0
0.038328
0.006969
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
9c96c8704cb8c2ac9d48edd9866c16421d7daf95
4,985
py
Python
cuchem/tests/test_cluster_wf.py
dorukozturk/cheminformatics
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
[ "Apache-2.0" ]
null
null
null
cuchem/tests/test_cluster_wf.py
dorukozturk/cheminformatics
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
[ "Apache-2.0" ]
null
null
null
cuchem/tests/test_cluster_wf.py
dorukozturk/cheminformatics
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
[ "Apache-2.0" ]
null
null
null
import cudf import logging from tests.utils import _fetch_chembl_test_dataset, _create_context # from cuchem.wf.cluster.cpukmeansumap import CpuKmeansUmap from cuchem.wf.cluster.gpukmeansumap import GpuKmeansUmap, GpuKmeansUmapHybrid from cuchem.wf.cluster.gpurandomprojection import GpuWorkflowRandomProjection from cuchemcommon.data.helper.chembldata import ChEmblData logger = logging.getLogger(__name__) # def test_cpukmeansumap(): # """ # Verify fetching data from chemblDB when the input is a pandas df. # """ # context = _create_context() # n_molecules, dao, mol_df = _fetch_chembl_test_dataset(n_molecules=10000) # logger.info(context.batch_size) # wf = CpuKmeansUmap(n_molecules=n_molecules, # dao=dao, n_pca=64) # embedding = wf.cluster(df_molecular_embedding=mol_df) # logger.info(embedding.head()) def test_random_proj(): """ Verify fetching data from chemblDB when the input is a pandas df. """ _create_context() n_molecules, dao, mol_df = _fetch_chembl_test_dataset() wf = GpuWorkflowRandomProjection(n_molecules=n_molecules, dao=dao) wf.cluster(df_mol_embedding=mol_df) def test_gpukmeansumap_dask(): """ Verify fetching data from chemblDB when the input is a pandas df. """ _create_context() n_molecules, dao, mol_df = _fetch_chembl_test_dataset() wf = GpuKmeansUmap(n_molecules=n_molecules, dao=dao, pca_comps=64) wf.cluster(df_mol_embedding=mol_df) def test_gpukmeansumap_cudf(): """ Verify fetching data from chemblDB when the input is a cudf df. """ _create_context() n_molecules, dao, mol_df = _fetch_chembl_test_dataset() wf = GpuKmeansUmap(n_molecules=n_molecules, dao=dao, pca_comps=64) mol_df = mol_df.compute() wf.cluster(df_mol_embedding=mol_df) def test_add_molecule_GpuKmeansUmap(): """ Verify fetching data from chemblDB when the input is a cudf df. """ _create_context() n_molecules, dao, mol_df = _fetch_chembl_test_dataset() if hasattr(mol_df, 'compute'): mol_df = mol_df.compute() mol_df = cudf.from_pandas(mol_df) n_molecules = mol_df.shape[0] # test mol should container aviable and new molecules test_mol = mol_df[n_molecules - 20:] mols_tobe_added = test_mol['id'].to_array().tolist() chData = ChEmblData() logger.info('Fetching ChEMBLLE id for %s', mols_tobe_added) mols_tobe_added = [str(row[0]) for row in chData.fetch_chemblId_by_molregno(mols_tobe_added)] logger.info('ChEMBL ids to be added %s', mols_tobe_added) # Molecules to be used for clustering mol_df = mol_df[:n_molecules - 10] wf = GpuKmeansUmap(n_molecules=n_molecules, dao=dao, pca_comps=64) wf.cluster(df_mol_embedding=mol_df) missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added) assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols) # TODO: Once the issue with add_molecule in multi-gpu env. is fixed, the # number of missing_molregno found should be 0 missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added) assert len(missing_mols) == 0, 'Expected no missing molecules found %d' % len(missing_mols) # assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols) def test_add_molecule_hybrid_wf(): """ Verify fetching data from chemblDB when the input is a cudf df. """ _create_context() n_molecules, dao, mol_df = _fetch_chembl_test_dataset() if hasattr(mol_df, 'compute'): mol_df = mol_df.compute() mol_df = cudf.from_pandas(mol_df) n_molecules = mol_df.shape[0] # test mol should container aviable and new molecules test_mol = mol_df[n_molecules - 20:] mols_tobe_added = test_mol['id'].to_array().tolist() chData = ChEmblData() logger.info('Fetching ChEMBLLE id for %s', mols_tobe_added) mols_tobe_added = [str(row[0]) for row in chData.fetch_chemblId_by_molregno(mols_tobe_added)] logger.info('ChEMBL ids to be added %s', mols_tobe_added) # Molecules to be used for clustering mol_df = mol_df[:n_molecules - 10] wf = GpuKmeansUmapHybrid(n_molecules=n_molecules, dao=dao, pca_comps=64) wf.cluster(df_mol_embedding=mol_df) missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added) assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols) # TODO: Once the issue with add_molecule in multi-gpu env. is fixed, the # number of missing_molregno found should be 0 missing_mols, molregnos, df_embedding = wf.add_molecules(mols_tobe_added) assert len(missing_mols) == 0, 'Expected no missing molecules found %d' % len(missing_mols) # assert len(missing_mols) == 10, 'Expected 10 missing molecules found %d' % len(missing_mols)
34.618056
98
0.712136
707
4,985
4.731259
0.154173
0.047833
0.05441
0.046039
0.813453
0.808371
0.792825
0.792825
0.792825
0.782362
0
0.011721
0.195587
4,985
143
99
34.86014
0.822444
0.283852
0
0.757576
0
0
0.078894
0
0
0
0
0.013986
0.060606
1
0.075758
false
0
0.090909
0
0.166667
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
8